2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
50 struct pending_tx_info
{
51 struct xen_netif_tx_request req
;
54 typedef unsigned int pending_ring_idx_t
;
56 struct netbk_rx_meta
{
62 #define MAX_PENDING_REQS 256
64 /* Discriminate from any valid pending_idx value. */
65 #define INVALID_PENDING_IDX 0xFFFF
67 #define MAX_BUFFER_OFFSET PAGE_SIZE
69 /* extra field used in struct page */
72 #if BITS_PER_LONG < 64
74 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
75 unsigned int group
:GROUP_WIDTH
;
76 unsigned int idx
:IDX_WIDTH
;
78 unsigned int group
, idx
;
86 struct task_struct
*task
;
88 struct sk_buff_head rx_queue
;
89 struct sk_buff_head tx_queue
;
91 struct timer_list net_timer
;
93 struct page
*mmap_pages
[MAX_PENDING_REQS
];
95 pending_ring_idx_t pending_prod
;
96 pending_ring_idx_t pending_cons
;
97 struct list_head net_schedule_list
;
99 /* Protect the net_schedule_list in netif. */
100 spinlock_t net_schedule_list_lock
;
102 atomic_t netfront_count
;
104 struct pending_tx_info pending_tx_info
[MAX_PENDING_REQS
];
105 struct gnttab_copy tx_copy_ops
[MAX_PENDING_REQS
];
107 u16 pending_ring
[MAX_PENDING_REQS
];
110 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
111 * head/fragment page uses 2 copy operations because it
112 * straddles two buffers in the frontend.
114 struct gnttab_copy grant_copy_op
[2*XEN_NETIF_RX_RING_SIZE
];
115 struct netbk_rx_meta meta
[2*XEN_NETIF_RX_RING_SIZE
];
118 static struct xen_netbk
*xen_netbk
;
119 static int xen_netbk_group_nr
;
121 void xen_netbk_add_xenvif(struct xenvif
*vif
)
124 int min_netfront_count
;
126 struct xen_netbk
*netbk
;
128 min_netfront_count
= atomic_read(&xen_netbk
[0].netfront_count
);
129 for (i
= 0; i
< xen_netbk_group_nr
; i
++) {
130 int netfront_count
= atomic_read(&xen_netbk
[i
].netfront_count
);
131 if (netfront_count
< min_netfront_count
) {
133 min_netfront_count
= netfront_count
;
137 netbk
= &xen_netbk
[min_group
];
140 atomic_inc(&netbk
->netfront_count
);
143 void xen_netbk_remove_xenvif(struct xenvif
*vif
)
145 struct xen_netbk
*netbk
= vif
->netbk
;
147 atomic_dec(&netbk
->netfront_count
);
150 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
);
151 static void make_tx_response(struct xenvif
*vif
,
152 struct xen_netif_tx_request
*txp
,
154 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
161 static inline unsigned long idx_to_pfn(struct xen_netbk
*netbk
,
164 return page_to_pfn(netbk
->mmap_pages
[idx
]);
167 static inline unsigned long idx_to_kaddr(struct xen_netbk
*netbk
,
170 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk
, idx
));
173 /* extra field used in struct page */
174 static inline void set_page_ext(struct page
*pg
, struct xen_netbk
*netbk
,
177 unsigned int group
= netbk
- xen_netbk
;
178 union page_ext ext
= { .e
= { .group
= group
+ 1, .idx
= idx
} };
180 BUILD_BUG_ON(sizeof(ext
) > sizeof(ext
.mapping
));
181 pg
->mapping
= ext
.mapping
;
184 static int get_page_ext(struct page
*pg
,
185 unsigned int *pgroup
, unsigned int *pidx
)
187 union page_ext ext
= { .mapping
= pg
->mapping
};
188 struct xen_netbk
*netbk
;
189 unsigned int group
, idx
;
191 group
= ext
.e
.group
- 1;
193 if (group
< 0 || group
>= xen_netbk_group_nr
)
196 netbk
= &xen_netbk
[group
];
200 if ((idx
< 0) || (idx
>= MAX_PENDING_REQS
))
203 if (netbk
->mmap_pages
[idx
] != pg
)
213 * This is the amount of packet we copy rather than map, so that the
214 * guest can't fiddle with the contents of the headers while we do
215 * packet processing on them (netfilter, routing, etc).
217 #define PKT_PROT_LEN (ETH_HLEN + \
219 sizeof(struct iphdr) + MAX_IPOPTLEN + \
220 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
222 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
224 return (u16
)frag
->page_offset
;
227 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
229 frag
->page_offset
= pending_idx
;
232 static inline pending_ring_idx_t
pending_index(unsigned i
)
234 return i
& (MAX_PENDING_REQS
-1);
237 static inline pending_ring_idx_t
nr_pending_reqs(struct xen_netbk
*netbk
)
239 return MAX_PENDING_REQS
-
240 netbk
->pending_prod
+ netbk
->pending_cons
;
243 static void xen_netbk_kick_thread(struct xen_netbk
*netbk
)
248 static int max_required_rx_slots(struct xenvif
*vif
)
250 int max
= DIV_ROUND_UP(vif
->dev
->mtu
, PAGE_SIZE
);
252 if (vif
->can_sg
|| vif
->gso
|| vif
->gso_prefix
)
253 max
+= MAX_SKB_FRAGS
+ 1; /* extra_info + frags */
258 int xen_netbk_rx_ring_full(struct xenvif
*vif
)
260 RING_IDX peek
= vif
->rx_req_cons_peek
;
261 RING_IDX needed
= max_required_rx_slots(vif
);
263 return ((vif
->rx
.sring
->req_prod
- peek
) < needed
) ||
264 ((vif
->rx
.rsp_prod_pvt
+ XEN_NETIF_RX_RING_SIZE
- peek
) < needed
);
267 int xen_netbk_must_stop_queue(struct xenvif
*vif
)
269 if (!xen_netbk_rx_ring_full(vif
))
272 vif
->rx
.sring
->req_event
= vif
->rx_req_cons_peek
+
273 max_required_rx_slots(vif
);
274 mb(); /* request notification /then/ check the queue */
276 return xen_netbk_rx_ring_full(vif
);
280 * Returns true if we should start a new receive buffer instead of
281 * adding 'size' bytes to a buffer which currently contains 'offset'
284 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
)
286 /* simple case: we have completely filled the current buffer. */
287 if (offset
== MAX_BUFFER_OFFSET
)
291 * complex case: start a fresh buffer if the current frag
292 * would overflow the current buffer but only if:
293 * (i) this frag would fit completely in the next buffer
294 * and (ii) there is already some data in the current buffer
295 * and (iii) this is not the head buffer.
298 * - (i) stops us splitting a frag into two copies
299 * unless the frag is too large for a single buffer.
300 * - (ii) stops us from leaving a buffer pointlessly empty.
301 * - (iii) stops us leaving the first buffer
302 * empty. Strictly speaking this is already covered
303 * by (ii) but is explicitly checked because
304 * netfront relies on the first buffer being
305 * non-empty and can crash otherwise.
307 * This means we will effectively linearise small
308 * frags but do not needlessly split large buffers
309 * into multiple copies tend to give large frags their
310 * own buffers as before.
312 if ((offset
+ size
> MAX_BUFFER_OFFSET
) &&
313 (size
<= MAX_BUFFER_OFFSET
) && offset
&& !head
)
320 * Figure out how many ring slots we're going to need to send @skb to
321 * the guest. This function is essentially a dry run of
322 * netbk_gop_frag_copy.
324 unsigned int xen_netbk_count_skb_slots(struct xenvif
*vif
, struct sk_buff
*skb
)
329 count
= DIV_ROUND_UP(skb_headlen(skb
), PAGE_SIZE
);
331 copy_off
= skb_headlen(skb
) % PAGE_SIZE
;
333 if (skb_shinfo(skb
)->gso_size
)
336 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
337 unsigned long size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
338 unsigned long offset
= skb_shinfo(skb
)->frags
[i
].page_offset
;
341 offset
&= ~PAGE_MASK
;
344 BUG_ON(offset
>= PAGE_SIZE
);
345 BUG_ON(copy_off
> MAX_BUFFER_OFFSET
);
347 bytes
= PAGE_SIZE
- offset
;
352 if (start_new_rx_buffer(copy_off
, bytes
, 0)) {
357 if (copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
358 bytes
= MAX_BUFFER_OFFSET
- copy_off
;
365 if (offset
== PAGE_SIZE
)
372 struct netrx_pending_operations
{
373 unsigned copy_prod
, copy_cons
;
374 unsigned meta_prod
, meta_cons
;
375 struct gnttab_copy
*copy
;
376 struct netbk_rx_meta
*meta
;
378 grant_ref_t copy_gref
;
381 static struct netbk_rx_meta
*get_next_rx_buffer(struct xenvif
*vif
,
382 struct netrx_pending_operations
*npo
)
384 struct netbk_rx_meta
*meta
;
385 struct xen_netif_rx_request
*req
;
387 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
389 meta
= npo
->meta
+ npo
->meta_prod
++;
395 npo
->copy_gref
= req
->gref
;
401 * Set up the grant operations for this fragment. If it's a flipping
402 * interface, we also set up the unmap request from here.
404 static void netbk_gop_frag_copy(struct xenvif
*vif
, struct sk_buff
*skb
,
405 struct netrx_pending_operations
*npo
,
406 struct page
*page
, unsigned long size
,
407 unsigned long offset
, int *head
)
409 struct gnttab_copy
*copy_gop
;
410 struct netbk_rx_meta
*meta
;
412 * These variables are used iff get_page_ext returns true,
413 * in which case they are guaranteed to be initialized.
415 unsigned int uninitialized_var(group
), uninitialized_var(idx
);
416 int foreign
= get_page_ext(page
, &group
, &idx
);
419 /* Data must not cross a page boundary. */
420 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
422 meta
= npo
->meta
+ npo
->meta_prod
- 1;
424 /* Skip unused frames from start of page */
425 page
+= offset
>> PAGE_SHIFT
;
426 offset
&= ~PAGE_MASK
;
429 BUG_ON(offset
>= PAGE_SIZE
);
430 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
432 bytes
= PAGE_SIZE
- offset
;
437 if (start_new_rx_buffer(npo
->copy_off
, bytes
, *head
)) {
439 * Netfront requires there to be some data in the head
444 meta
= get_next_rx_buffer(vif
, npo
);
447 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
448 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
450 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
451 copy_gop
->flags
= GNTCOPY_dest_gref
;
453 struct xen_netbk
*netbk
= &xen_netbk
[group
];
454 struct pending_tx_info
*src_pend
;
456 src_pend
= &netbk
->pending_tx_info
[idx
];
458 copy_gop
->source
.domid
= src_pend
->vif
->domid
;
459 copy_gop
->source
.u
.ref
= src_pend
->req
.gref
;
460 copy_gop
->flags
|= GNTCOPY_source_gref
;
462 void *vaddr
= page_address(page
);
463 copy_gop
->source
.domid
= DOMID_SELF
;
464 copy_gop
->source
.u
.gmfn
= virt_to_mfn(vaddr
);
466 copy_gop
->source
.offset
= offset
;
467 copy_gop
->dest
.domid
= vif
->domid
;
469 copy_gop
->dest
.offset
= npo
->copy_off
;
470 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
471 copy_gop
->len
= bytes
;
473 npo
->copy_off
+= bytes
;
480 if (offset
== PAGE_SIZE
&& size
) {
481 BUG_ON(!PageCompound(page
));
486 /* Leave a gap for the GSO descriptor. */
487 if (*head
&& skb_shinfo(skb
)->gso_size
&& !vif
->gso_prefix
)
490 *head
= 0; /* There must be something in this buffer now. */
496 * Prepare an SKB to be transmitted to the frontend.
498 * This function is responsible for allocating grant operations, meta
501 * It returns the number of meta structures consumed. The number of
502 * ring slots used is always equal to the number of meta slots used
503 * plus the number of GSO descriptors used. Currently, we use either
504 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
505 * frontend-side LRO).
507 static int netbk_gop_skb(struct sk_buff
*skb
,
508 struct netrx_pending_operations
*npo
)
510 struct xenvif
*vif
= netdev_priv(skb
->dev
);
511 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
513 struct xen_netif_rx_request
*req
;
514 struct netbk_rx_meta
*meta
;
519 old_meta_prod
= npo
->meta_prod
;
521 /* Set up a GSO prefix descriptor, if necessary */
522 if (skb_shinfo(skb
)->gso_size
&& vif
->gso_prefix
) {
523 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
524 meta
= npo
->meta
+ npo
->meta_prod
++;
525 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
530 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
531 meta
= npo
->meta
+ npo
->meta_prod
++;
533 if (!vif
->gso_prefix
)
534 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
541 npo
->copy_gref
= req
->gref
;
544 while (data
< skb_tail_pointer(skb
)) {
545 unsigned int offset
= offset_in_page(data
);
546 unsigned int len
= PAGE_SIZE
- offset
;
548 if (data
+ len
> skb_tail_pointer(skb
))
549 len
= skb_tail_pointer(skb
) - data
;
551 netbk_gop_frag_copy(vif
, skb
, npo
,
552 virt_to_page(data
), len
, offset
, &head
);
556 for (i
= 0; i
< nr_frags
; i
++) {
557 netbk_gop_frag_copy(vif
, skb
, npo
,
558 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
559 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
560 skb_shinfo(skb
)->frags
[i
].page_offset
,
564 return npo
->meta_prod
- old_meta_prod
;
568 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
569 * used to set up the operations on the top of
570 * netrx_pending_operations, which have since been done. Check that
571 * they didn't give any errors and advance over them.
573 static int netbk_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
574 struct netrx_pending_operations
*npo
)
576 struct gnttab_copy
*copy_op
;
577 int status
= XEN_NETIF_RSP_OKAY
;
580 for (i
= 0; i
< nr_meta_slots
; i
++) {
581 copy_op
= npo
->copy
+ npo
->copy_cons
++;
582 if (copy_op
->status
!= GNTST_okay
) {
584 "Bad status %d from copy to DOM%d.\n",
585 copy_op
->status
, vif
->domid
);
586 status
= XEN_NETIF_RSP_ERROR
;
593 static void netbk_add_frag_responses(struct xenvif
*vif
, int status
,
594 struct netbk_rx_meta
*meta
,
598 unsigned long offset
;
600 /* No fragments used */
601 if (nr_meta_slots
<= 1)
606 for (i
= 0; i
< nr_meta_slots
; i
++) {
608 if (i
== nr_meta_slots
- 1)
611 flags
= XEN_NETRXF_more_data
;
614 make_rx_response(vif
, meta
[i
].id
, status
, offset
,
615 meta
[i
].size
, flags
);
619 struct skb_cb_overlay
{
623 static void xen_netbk_rx_action(struct xen_netbk
*netbk
)
625 struct xenvif
*vif
= NULL
, *tmp
;
628 struct xen_netif_rx_response
*resp
;
629 struct sk_buff_head rxq
;
635 unsigned long offset
;
636 struct skb_cb_overlay
*sco
;
638 struct netrx_pending_operations npo
= {
639 .copy
= netbk
->grant_copy_op
,
643 skb_queue_head_init(&rxq
);
647 while ((skb
= skb_dequeue(&netbk
->rx_queue
)) != NULL
) {
648 vif
= netdev_priv(skb
->dev
);
649 nr_frags
= skb_shinfo(skb
)->nr_frags
;
651 sco
= (struct skb_cb_overlay
*)skb
->cb
;
652 sco
->meta_slots_used
= netbk_gop_skb(skb
, &npo
);
654 count
+= nr_frags
+ 1;
656 __skb_queue_tail(&rxq
, skb
);
658 /* Filled the batch queue? */
659 if (count
+ MAX_SKB_FRAGS
>= XEN_NETIF_RX_RING_SIZE
)
663 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(netbk
->meta
));
668 BUG_ON(npo
.copy_prod
> ARRAY_SIZE(netbk
->grant_copy_op
));
669 gnttab_batch_copy(netbk
->grant_copy_op
, npo
.copy_prod
);
671 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
672 sco
= (struct skb_cb_overlay
*)skb
->cb
;
674 vif
= netdev_priv(skb
->dev
);
676 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& vif
->gso_prefix
) {
677 resp
= RING_GET_RESPONSE(&vif
->rx
,
678 vif
->rx
.rsp_prod_pvt
++);
680 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
682 resp
->offset
= netbk
->meta
[npo
.meta_cons
].gso_size
;
683 resp
->id
= netbk
->meta
[npo
.meta_cons
].id
;
684 resp
->status
= sco
->meta_slots_used
;
687 sco
->meta_slots_used
--;
691 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
692 vif
->dev
->stats
.tx_packets
++;
694 status
= netbk_check_gop(vif
, sco
->meta_slots_used
, &npo
);
696 if (sco
->meta_slots_used
== 1)
699 flags
= XEN_NETRXF_more_data
;
701 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
702 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
703 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
704 /* remote but checksummed. */
705 flags
|= XEN_NETRXF_data_validated
;
708 resp
= make_rx_response(vif
, netbk
->meta
[npo
.meta_cons
].id
,
710 netbk
->meta
[npo
.meta_cons
].size
,
713 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& !vif
->gso_prefix
) {
714 struct xen_netif_extra_info
*gso
=
715 (struct xen_netif_extra_info
*)
716 RING_GET_RESPONSE(&vif
->rx
,
717 vif
->rx
.rsp_prod_pvt
++);
719 resp
->flags
|= XEN_NETRXF_extra_info
;
721 gso
->u
.gso
.size
= netbk
->meta
[npo
.meta_cons
].gso_size
;
722 gso
->u
.gso
.type
= XEN_NETIF_GSO_TYPE_TCPV4
;
724 gso
->u
.gso
.features
= 0;
726 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
730 netbk_add_frag_responses(vif
, status
,
731 netbk
->meta
+ npo
.meta_cons
+ 1,
732 sco
->meta_slots_used
);
734 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->rx
, ret
);
736 if (ret
&& list_empty(&vif
->notify_list
))
737 list_add_tail(&vif
->notify_list
, ¬ify
);
739 xenvif_notify_tx_completion(vif
);
742 npo
.meta_cons
+= sco
->meta_slots_used
;
746 list_for_each_entry_safe(vif
, tmp
, ¬ify
, notify_list
) {
747 notify_remote_via_irq(vif
->irq
);
748 list_del_init(&vif
->notify_list
);
751 /* More work to do? */
752 if (!skb_queue_empty(&netbk
->rx_queue
) &&
753 !timer_pending(&netbk
->net_timer
))
754 xen_netbk_kick_thread(netbk
);
757 void xen_netbk_queue_tx_skb(struct xenvif
*vif
, struct sk_buff
*skb
)
759 struct xen_netbk
*netbk
= vif
->netbk
;
761 skb_queue_tail(&netbk
->rx_queue
, skb
);
763 xen_netbk_kick_thread(netbk
);
766 static void xen_netbk_alarm(unsigned long data
)
768 struct xen_netbk
*netbk
= (struct xen_netbk
*)data
;
769 xen_netbk_kick_thread(netbk
);
772 static int __on_net_schedule_list(struct xenvif
*vif
)
774 return !list_empty(&vif
->schedule_list
);
777 /* Must be called with net_schedule_list_lock held */
778 static void remove_from_net_schedule_list(struct xenvif
*vif
)
780 if (likely(__on_net_schedule_list(vif
))) {
781 list_del_init(&vif
->schedule_list
);
786 static struct xenvif
*poll_net_schedule_list(struct xen_netbk
*netbk
)
788 struct xenvif
*vif
= NULL
;
790 spin_lock_irq(&netbk
->net_schedule_list_lock
);
791 if (list_empty(&netbk
->net_schedule_list
))
794 vif
= list_first_entry(&netbk
->net_schedule_list
,
795 struct xenvif
, schedule_list
);
801 remove_from_net_schedule_list(vif
);
803 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
807 void xen_netbk_schedule_xenvif(struct xenvif
*vif
)
810 struct xen_netbk
*netbk
= vif
->netbk
;
812 if (__on_net_schedule_list(vif
))
815 spin_lock_irqsave(&netbk
->net_schedule_list_lock
, flags
);
816 if (!__on_net_schedule_list(vif
) &&
817 likely(xenvif_schedulable(vif
))) {
818 list_add_tail(&vif
->schedule_list
, &netbk
->net_schedule_list
);
821 spin_unlock_irqrestore(&netbk
->net_schedule_list_lock
, flags
);
825 if ((nr_pending_reqs(netbk
) < (MAX_PENDING_REQS
/2)) &&
826 !list_empty(&netbk
->net_schedule_list
))
827 xen_netbk_kick_thread(netbk
);
830 void xen_netbk_deschedule_xenvif(struct xenvif
*vif
)
832 struct xen_netbk
*netbk
= vif
->netbk
;
833 spin_lock_irq(&netbk
->net_schedule_list_lock
);
834 remove_from_net_schedule_list(vif
);
835 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
838 void xen_netbk_check_rx_xenvif(struct xenvif
*vif
)
842 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, more_to_do
);
845 xen_netbk_schedule_xenvif(vif
);
848 static void tx_add_credit(struct xenvif
*vif
)
850 unsigned long max_burst
, max_credit
;
853 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
854 * Otherwise the interface can seize up due to insufficient credit.
856 max_burst
= RING_GET_REQUEST(&vif
->tx
, vif
->tx
.req_cons
)->size
;
857 max_burst
= min(max_burst
, 131072UL);
858 max_burst
= max(max_burst
, vif
->credit_bytes
);
860 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
861 max_credit
= vif
->remaining_credit
+ vif
->credit_bytes
;
862 if (max_credit
< vif
->remaining_credit
)
863 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
865 vif
->remaining_credit
= min(max_credit
, max_burst
);
868 static void tx_credit_callback(unsigned long data
)
870 struct xenvif
*vif
= (struct xenvif
*)data
;
872 xen_netbk_check_rx_xenvif(vif
);
875 static void netbk_tx_err(struct xenvif
*vif
,
876 struct xen_netif_tx_request
*txp
, RING_IDX end
)
878 RING_IDX cons
= vif
->tx
.req_cons
;
881 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
884 txp
= RING_GET_REQUEST(&vif
->tx
, cons
++);
886 vif
->tx
.req_cons
= cons
;
887 xen_netbk_check_rx_xenvif(vif
);
891 static int netbk_count_requests(struct xenvif
*vif
,
892 struct xen_netif_tx_request
*first
,
893 struct xen_netif_tx_request
*txp
,
896 RING_IDX cons
= vif
->tx
.req_cons
;
899 if (!(first
->flags
& XEN_NETTXF_more_data
))
903 if (frags
>= work_to_do
) {
904 netdev_dbg(vif
->dev
, "Need more frags\n");
908 if (unlikely(frags
>= MAX_SKB_FRAGS
)) {
909 netdev_dbg(vif
->dev
, "Too many frags\n");
913 memcpy(txp
, RING_GET_REQUEST(&vif
->tx
, cons
+ frags
),
915 if (txp
->size
> first
->size
) {
916 netdev_dbg(vif
->dev
, "Frags galore\n");
920 first
->size
-= txp
->size
;
923 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
924 netdev_dbg(vif
->dev
, "txp->offset: %x, size: %u\n",
925 txp
->offset
, txp
->size
);
928 } while ((txp
++)->flags
& XEN_NETTXF_more_data
);
932 static struct page
*xen_netbk_alloc_page(struct xen_netbk
*netbk
,
937 page
= alloc_page(GFP_KERNEL
|__GFP_COLD
);
940 set_page_ext(page
, netbk
, pending_idx
);
941 netbk
->mmap_pages
[pending_idx
] = page
;
945 static struct gnttab_copy
*xen_netbk_get_requests(struct xen_netbk
*netbk
,
948 struct xen_netif_tx_request
*txp
,
949 struct gnttab_copy
*gop
)
951 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
952 skb_frag_t
*frags
= shinfo
->frags
;
953 u16 pending_idx
= *((u16
*)skb
->data
);
956 /* Skip first skb fragment if it is on same page as header fragment. */
957 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
959 for (i
= start
; i
< shinfo
->nr_frags
; i
++, txp
++) {
961 pending_ring_idx_t index
;
962 struct pending_tx_info
*pending_tx_info
=
963 netbk
->pending_tx_info
;
965 index
= pending_index(netbk
->pending_cons
++);
966 pending_idx
= netbk
->pending_ring
[index
];
967 page
= xen_netbk_alloc_page(netbk
, skb
, pending_idx
);
971 gop
->source
.u
.ref
= txp
->gref
;
972 gop
->source
.domid
= vif
->domid
;
973 gop
->source
.offset
= txp
->offset
;
975 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
976 gop
->dest
.domid
= DOMID_SELF
;
977 gop
->dest
.offset
= txp
->offset
;
979 gop
->len
= txp
->size
;
980 gop
->flags
= GNTCOPY_source_gref
;
984 memcpy(&pending_tx_info
[pending_idx
].req
, txp
, sizeof(*txp
));
986 pending_tx_info
[pending_idx
].vif
= vif
;
987 frag_set_pending_idx(&frags
[i
], pending_idx
);
993 static int xen_netbk_tx_check_gop(struct xen_netbk
*netbk
,
995 struct gnttab_copy
**gopp
)
997 struct gnttab_copy
*gop
= *gopp
;
998 u16 pending_idx
= *((u16
*)skb
->data
);
999 struct pending_tx_info
*pending_tx_info
= netbk
->pending_tx_info
;
1000 struct xenvif
*vif
= pending_tx_info
[pending_idx
].vif
;
1001 struct xen_netif_tx_request
*txp
;
1002 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1003 int nr_frags
= shinfo
->nr_frags
;
1006 /* Check status of header. */
1008 if (unlikely(err
)) {
1009 pending_ring_idx_t index
;
1010 index
= pending_index(netbk
->pending_prod
++);
1011 txp
= &pending_tx_info
[pending_idx
].req
;
1012 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
1013 netbk
->pending_ring
[index
] = pending_idx
;
1017 /* Skip first skb fragment if it is on same page as header fragment. */
1018 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
1020 for (i
= start
; i
< nr_frags
; i
++) {
1022 pending_ring_idx_t index
;
1024 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
1026 /* Check error status: if okay then remember grant handle. */
1027 newerr
= (++gop
)->status
;
1028 if (likely(!newerr
)) {
1029 /* Had a previous error? Invalidate this fragment. */
1031 xen_netbk_idx_release(netbk
, pending_idx
);
1035 /* Error on this fragment: respond to client with an error. */
1036 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1037 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
1038 index
= pending_index(netbk
->pending_prod
++);
1039 netbk
->pending_ring
[index
] = pending_idx
;
1042 /* Not the first error? Preceding frags already invalidated. */
1046 /* First error: invalidate header and preceding fragments. */
1047 pending_idx
= *((u16
*)skb
->data
);
1048 xen_netbk_idx_release(netbk
, pending_idx
);
1049 for (j
= start
; j
< i
; j
++) {
1050 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
1051 xen_netbk_idx_release(netbk
, pending_idx
);
1054 /* Remember the error: invalidate all subsequent fragments. */
1062 static void xen_netbk_fill_frags(struct xen_netbk
*netbk
, struct sk_buff
*skb
)
1064 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1065 int nr_frags
= shinfo
->nr_frags
;
1068 for (i
= 0; i
< nr_frags
; i
++) {
1069 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1070 struct xen_netif_tx_request
*txp
;
1074 pending_idx
= frag_get_pending_idx(frag
);
1076 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1077 page
= virt_to_page(idx_to_kaddr(netbk
, pending_idx
));
1078 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1079 skb
->len
+= txp
->size
;
1080 skb
->data_len
+= txp
->size
;
1081 skb
->truesize
+= txp
->size
;
1083 /* Take an extra reference to offset xen_netbk_idx_release */
1084 get_page(netbk
->mmap_pages
[pending_idx
]);
1085 xen_netbk_idx_release(netbk
, pending_idx
);
1089 static int xen_netbk_get_extras(struct xenvif
*vif
,
1090 struct xen_netif_extra_info
*extras
,
1093 struct xen_netif_extra_info extra
;
1094 RING_IDX cons
= vif
->tx
.req_cons
;
1097 if (unlikely(work_to_do
-- <= 0)) {
1098 netdev_dbg(vif
->dev
, "Missing extra info\n");
1102 memcpy(&extra
, RING_GET_REQUEST(&vif
->tx
, cons
),
1104 if (unlikely(!extra
.type
||
1105 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1106 vif
->tx
.req_cons
= ++cons
;
1107 netdev_dbg(vif
->dev
,
1108 "Invalid extra type: %d\n", extra
.type
);
1112 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1113 vif
->tx
.req_cons
= ++cons
;
1114 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1119 static int netbk_set_skb_gso(struct xenvif
*vif
,
1120 struct sk_buff
*skb
,
1121 struct xen_netif_extra_info
*gso
)
1123 if (!gso
->u
.gso
.size
) {
1124 netdev_dbg(vif
->dev
, "GSO size must not be zero.\n");
1128 /* Currently only TCPv4 S.O. is supported. */
1129 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
) {
1130 netdev_dbg(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1134 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1135 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1137 /* Header must be checked, and gso_segs computed. */
1138 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1139 skb_shinfo(skb
)->gso_segs
= 0;
1144 static int checksum_setup(struct xenvif
*vif
, struct sk_buff
*skb
)
1149 int recalculate_partial_csum
= 0;
1152 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1153 * peers can fail to set NETRXF_csum_blank when sending a GSO
1154 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1155 * recalculate the partial checksum.
1157 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1158 vif
->rx_gso_checksum_fixup
++;
1159 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1160 recalculate_partial_csum
= 1;
1163 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1164 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1167 if (skb
->protocol
!= htons(ETH_P_IP
))
1170 iph
= (void *)skb
->data
;
1171 th
= skb
->data
+ 4 * iph
->ihl
;
1172 if (th
>= skb_tail_pointer(skb
))
1175 skb
->csum_start
= th
- skb
->head
;
1176 switch (iph
->protocol
) {
1178 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
1180 if (recalculate_partial_csum
) {
1181 struct tcphdr
*tcph
= (struct tcphdr
*)th
;
1182 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1183 skb
->len
- iph
->ihl
*4,
1188 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1190 if (recalculate_partial_csum
) {
1191 struct udphdr
*udph
= (struct udphdr
*)th
;
1192 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1193 skb
->len
- iph
->ihl
*4,
1198 if (net_ratelimit())
1199 netdev_err(vif
->dev
,
1200 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1205 if ((th
+ skb
->csum_offset
+ 2) > skb_tail_pointer(skb
))
1214 static bool tx_credit_exceeded(struct xenvif
*vif
, unsigned size
)
1216 unsigned long now
= jiffies
;
1217 unsigned long next_credit
=
1218 vif
->credit_timeout
.expires
+
1219 msecs_to_jiffies(vif
->credit_usec
/ 1000);
1221 /* Timer could already be pending in rare cases. */
1222 if (timer_pending(&vif
->credit_timeout
))
1225 /* Passed the point where we can replenish credit? */
1226 if (time_after_eq(now
, next_credit
)) {
1227 vif
->credit_timeout
.expires
= now
;
1231 /* Still too big to send right now? Set a callback. */
1232 if (size
> vif
->remaining_credit
) {
1233 vif
->credit_timeout
.data
=
1235 vif
->credit_timeout
.function
=
1237 mod_timer(&vif
->credit_timeout
,
1246 static unsigned xen_netbk_tx_build_gops(struct xen_netbk
*netbk
)
1248 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
, *request_gop
;
1249 struct sk_buff
*skb
;
1252 while (((nr_pending_reqs(netbk
) + MAX_SKB_FRAGS
) < MAX_PENDING_REQS
) &&
1253 !list_empty(&netbk
->net_schedule_list
)) {
1255 struct xen_netif_tx_request txreq
;
1256 struct xen_netif_tx_request txfrags
[MAX_SKB_FRAGS
];
1258 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1262 unsigned int data_len
;
1263 pending_ring_idx_t index
;
1265 /* Get a netif from the list with work to do. */
1266 vif
= poll_net_schedule_list(netbk
);
1270 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, work_to_do
);
1276 idx
= vif
->tx
.req_cons
;
1277 rmb(); /* Ensure that we see the request before we copy it. */
1278 memcpy(&txreq
, RING_GET_REQUEST(&vif
->tx
, idx
), sizeof(txreq
));
1280 /* Credit-based scheduling. */
1281 if (txreq
.size
> vif
->remaining_credit
&&
1282 tx_credit_exceeded(vif
, txreq
.size
)) {
1287 vif
->remaining_credit
-= txreq
.size
;
1290 vif
->tx
.req_cons
= ++idx
;
1292 memset(extras
, 0, sizeof(extras
));
1293 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1294 work_to_do
= xen_netbk_get_extras(vif
, extras
,
1296 idx
= vif
->tx
.req_cons
;
1297 if (unlikely(work_to_do
< 0)) {
1298 netbk_tx_err(vif
, &txreq
, idx
);
1303 ret
= netbk_count_requests(vif
, &txreq
, txfrags
, work_to_do
);
1304 if (unlikely(ret
< 0)) {
1305 netbk_tx_err(vif
, &txreq
, idx
- ret
);
1310 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1311 netdev_dbg(vif
->dev
,
1312 "Bad packet size: %d\n", txreq
.size
);
1313 netbk_tx_err(vif
, &txreq
, idx
);
1317 /* No crossing a page as the payload mustn't fragment. */
1318 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1319 netdev_dbg(vif
->dev
,
1320 "txreq.offset: %x, size: %u, end: %lu\n",
1321 txreq
.offset
, txreq
.size
,
1322 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1323 netbk_tx_err(vif
, &txreq
, idx
);
1327 index
= pending_index(netbk
->pending_cons
);
1328 pending_idx
= netbk
->pending_ring
[index
];
1330 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1331 ret
< MAX_SKB_FRAGS
) ?
1332 PKT_PROT_LEN
: txreq
.size
;
1334 skb
= alloc_skb(data_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
1335 GFP_ATOMIC
| __GFP_NOWARN
);
1336 if (unlikely(skb
== NULL
)) {
1337 netdev_dbg(vif
->dev
,
1338 "Can't allocate a skb in start_xmit.\n");
1339 netbk_tx_err(vif
, &txreq
, idx
);
1343 /* Packets passed to netif_rx() must have some headroom. */
1344 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1346 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1347 struct xen_netif_extra_info
*gso
;
1348 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1350 if (netbk_set_skb_gso(vif
, skb
, gso
)) {
1352 netbk_tx_err(vif
, &txreq
, idx
);
1357 /* XXX could copy straight to head */
1358 page
= xen_netbk_alloc_page(netbk
, skb
, pending_idx
);
1361 netbk_tx_err(vif
, &txreq
, idx
);
1365 gop
->source
.u
.ref
= txreq
.gref
;
1366 gop
->source
.domid
= vif
->domid
;
1367 gop
->source
.offset
= txreq
.offset
;
1369 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1370 gop
->dest
.domid
= DOMID_SELF
;
1371 gop
->dest
.offset
= txreq
.offset
;
1373 gop
->len
= txreq
.size
;
1374 gop
->flags
= GNTCOPY_source_gref
;
1378 memcpy(&netbk
->pending_tx_info
[pending_idx
].req
,
1379 &txreq
, sizeof(txreq
));
1380 netbk
->pending_tx_info
[pending_idx
].vif
= vif
;
1381 *((u16
*)skb
->data
) = pending_idx
;
1383 __skb_put(skb
, data_len
);
1385 skb_shinfo(skb
)->nr_frags
= ret
;
1386 if (data_len
< txreq
.size
) {
1387 skb_shinfo(skb
)->nr_frags
++;
1388 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1391 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1392 INVALID_PENDING_IDX
);
1395 netbk
->pending_cons
++;
1397 request_gop
= xen_netbk_get_requests(netbk
, vif
,
1399 if (request_gop
== NULL
) {
1401 netbk_tx_err(vif
, &txreq
, idx
);
1406 __skb_queue_tail(&netbk
->tx_queue
, skb
);
1408 vif
->tx
.req_cons
= idx
;
1409 xen_netbk_check_rx_xenvif(vif
);
1411 if ((gop
-netbk
->tx_copy_ops
) >= ARRAY_SIZE(netbk
->tx_copy_ops
))
1415 return gop
- netbk
->tx_copy_ops
;
1418 static void xen_netbk_tx_submit(struct xen_netbk
*netbk
)
1420 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
;
1421 struct sk_buff
*skb
;
1423 while ((skb
= __skb_dequeue(&netbk
->tx_queue
)) != NULL
) {
1424 struct xen_netif_tx_request
*txp
;
1429 pending_idx
= *((u16
*)skb
->data
);
1430 vif
= netbk
->pending_tx_info
[pending_idx
].vif
;
1431 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1433 /* Check the remap error code. */
1434 if (unlikely(xen_netbk_tx_check_gop(netbk
, skb
, &gop
))) {
1435 netdev_dbg(vif
->dev
, "netback grant failed.\n");
1436 skb_shinfo(skb
)->nr_frags
= 0;
1441 data_len
= skb
->len
;
1443 (void *)(idx_to_kaddr(netbk
, pending_idx
)|txp
->offset
),
1445 if (data_len
< txp
->size
) {
1446 /* Append the packet payload as a fragment. */
1447 txp
->offset
+= data_len
;
1448 txp
->size
-= data_len
;
1450 /* Schedule a response immediately. */
1451 xen_netbk_idx_release(netbk
, pending_idx
);
1454 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1455 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1456 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1457 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1459 xen_netbk_fill_frags(netbk
, skb
);
1462 * If the initial fragment was < PKT_PROT_LEN then
1463 * pull through some bytes from the other fragments to
1464 * increase the linear region to PKT_PROT_LEN bytes.
1466 if (skb_headlen(skb
) < PKT_PROT_LEN
&& skb_is_nonlinear(skb
)) {
1467 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1468 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1471 skb
->dev
= vif
->dev
;
1472 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1474 if (checksum_setup(vif
, skb
)) {
1475 netdev_dbg(vif
->dev
,
1476 "Can't setup checksum in net_tx_action\n");
1481 vif
->dev
->stats
.rx_bytes
+= skb
->len
;
1482 vif
->dev
->stats
.rx_packets
++;
1484 xenvif_receive_skb(vif
, skb
);
1488 /* Called after netfront has transmitted */
1489 static void xen_netbk_tx_action(struct xen_netbk
*netbk
)
1493 nr_gops
= xen_netbk_tx_build_gops(netbk
);
1498 gnttab_batch_copy(netbk
->tx_copy_ops
, nr_gops
);
1500 xen_netbk_tx_submit(netbk
);
1503 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
)
1506 struct pending_tx_info
*pending_tx_info
;
1507 pending_ring_idx_t index
;
1509 /* Already complete? */
1510 if (netbk
->mmap_pages
[pending_idx
] == NULL
)
1513 pending_tx_info
= &netbk
->pending_tx_info
[pending_idx
];
1515 vif
= pending_tx_info
->vif
;
1517 make_tx_response(vif
, &pending_tx_info
->req
, XEN_NETIF_RSP_OKAY
);
1519 index
= pending_index(netbk
->pending_prod
++);
1520 netbk
->pending_ring
[index
] = pending_idx
;
1524 netbk
->mmap_pages
[pending_idx
]->mapping
= 0;
1525 put_page(netbk
->mmap_pages
[pending_idx
]);
1526 netbk
->mmap_pages
[pending_idx
] = NULL
;
1529 static void make_tx_response(struct xenvif
*vif
,
1530 struct xen_netif_tx_request
*txp
,
1533 RING_IDX i
= vif
->tx
.rsp_prod_pvt
;
1534 struct xen_netif_tx_response
*resp
;
1537 resp
= RING_GET_RESPONSE(&vif
->tx
, i
);
1541 if (txp
->flags
& XEN_NETTXF_extra_info
)
1542 RING_GET_RESPONSE(&vif
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1544 vif
->tx
.rsp_prod_pvt
= ++i
;
1545 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->tx
, notify
);
1547 notify_remote_via_irq(vif
->irq
);
1550 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
1557 RING_IDX i
= vif
->rx
.rsp_prod_pvt
;
1558 struct xen_netif_rx_response
*resp
;
1560 resp
= RING_GET_RESPONSE(&vif
->rx
, i
);
1561 resp
->offset
= offset
;
1562 resp
->flags
= flags
;
1564 resp
->status
= (s16
)size
;
1566 resp
->status
= (s16
)st
;
1568 vif
->rx
.rsp_prod_pvt
= ++i
;
1573 static inline int rx_work_todo(struct xen_netbk
*netbk
)
1575 return !skb_queue_empty(&netbk
->rx_queue
);
1578 static inline int tx_work_todo(struct xen_netbk
*netbk
)
1581 if (((nr_pending_reqs(netbk
) + MAX_SKB_FRAGS
) < MAX_PENDING_REQS
) &&
1582 !list_empty(&netbk
->net_schedule_list
))
1588 static int xen_netbk_kthread(void *data
)
1590 struct xen_netbk
*netbk
= data
;
1591 while (!kthread_should_stop()) {
1592 wait_event_interruptible(netbk
->wq
,
1593 rx_work_todo(netbk
) ||
1594 tx_work_todo(netbk
) ||
1595 kthread_should_stop());
1598 if (kthread_should_stop())
1601 if (rx_work_todo(netbk
))
1602 xen_netbk_rx_action(netbk
);
1604 if (tx_work_todo(netbk
))
1605 xen_netbk_tx_action(netbk
);
1611 void xen_netbk_unmap_frontend_rings(struct xenvif
*vif
)
1614 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1617 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1621 int xen_netbk_map_frontend_rings(struct xenvif
*vif
,
1622 grant_ref_t tx_ring_ref
,
1623 grant_ref_t rx_ring_ref
)
1626 struct xen_netif_tx_sring
*txs
;
1627 struct xen_netif_rx_sring
*rxs
;
1631 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1632 tx_ring_ref
, &addr
);
1636 txs
= (struct xen_netif_tx_sring
*)addr
;
1637 BACK_RING_INIT(&vif
->tx
, txs
, PAGE_SIZE
);
1639 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1640 rx_ring_ref
, &addr
);
1644 rxs
= (struct xen_netif_rx_sring
*)addr
;
1645 BACK_RING_INIT(&vif
->rx
, rxs
, PAGE_SIZE
);
1647 vif
->rx_req_cons_peek
= 0;
1652 xen_netbk_unmap_frontend_rings(vif
);
1656 static int __init
netback_init(void)
1665 xen_netbk_group_nr
= num_online_cpus();
1666 xen_netbk
= vzalloc(sizeof(struct xen_netbk
) * xen_netbk_group_nr
);
1670 for (group
= 0; group
< xen_netbk_group_nr
; group
++) {
1671 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1672 skb_queue_head_init(&netbk
->rx_queue
);
1673 skb_queue_head_init(&netbk
->tx_queue
);
1675 init_timer(&netbk
->net_timer
);
1676 netbk
->net_timer
.data
= (unsigned long)netbk
;
1677 netbk
->net_timer
.function
= xen_netbk_alarm
;
1679 netbk
->pending_cons
= 0;
1680 netbk
->pending_prod
= MAX_PENDING_REQS
;
1681 for (i
= 0; i
< MAX_PENDING_REQS
; i
++)
1682 netbk
->pending_ring
[i
] = i
;
1684 init_waitqueue_head(&netbk
->wq
);
1685 netbk
->task
= kthread_create(xen_netbk_kthread
,
1687 "netback/%u", group
);
1689 if (IS_ERR(netbk
->task
)) {
1690 printk(KERN_ALERT
"kthread_create() fails at netback\n");
1691 del_timer(&netbk
->net_timer
);
1692 rc
= PTR_ERR(netbk
->task
);
1696 kthread_bind(netbk
->task
, group
);
1698 INIT_LIST_HEAD(&netbk
->net_schedule_list
);
1700 spin_lock_init(&netbk
->net_schedule_list_lock
);
1702 atomic_set(&netbk
->netfront_count
, 0);
1704 wake_up_process(netbk
->task
);
1707 rc
= xenvif_xenbus_init();
1714 while (--group
>= 0) {
1715 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1716 for (i
= 0; i
< MAX_PENDING_REQS
; i
++) {
1717 if (netbk
->mmap_pages
[i
])
1718 __free_page(netbk
->mmap_pages
[i
]);
1720 del_timer(&netbk
->net_timer
);
1721 kthread_stop(netbk
->task
);
1728 module_init(netback_init
);
1730 MODULE_LICENSE("Dual BSD/GPL");
1731 MODULE_ALIAS("xen-backend:vif");