2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
45 #include <linux/slab.h>
48 #include <asm/xen/page.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
60 /* Module parameters */
61 static unsigned int xennet_max_queues
;
62 module_param_named(max_queues
, xennet_max_queues
, uint
, 0644);
63 MODULE_PARM_DESC(max_queues
,
64 "Maximum number of queues per virtual interface");
66 static const struct ethtool_ops xennet_ethtool_ops
;
72 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
74 #define RX_COPY_THRESHOLD 256
76 #define GRANT_INVALID_REF 0
78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
81 /* Minimum number of Rx slots (includes slot for GSO metadata). */
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
84 /* Queue name is interface name with "-qNNN" appended */
85 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
87 /* IRQ name is queue name with "-tx" or "-rx" appended */
88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
90 struct netfront_stats
{
95 struct u64_stats_sync syncp
;
100 struct netfront_queue
{
101 unsigned int id
; /* Queue ID, 0-based */
102 char name
[QUEUE_NAME_SIZE
]; /* DEVNAME-qN */
103 struct netfront_info
*info
;
105 struct napi_struct napi
;
107 /* Split event channels support, tx_* == rx_* when using
108 * single event channel.
110 unsigned int tx_evtchn
, rx_evtchn
;
111 unsigned int tx_irq
, rx_irq
;
112 /* Only used when split event channels support is enabled */
113 char tx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-tx */
114 char rx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-rx */
117 struct xen_netif_tx_front_ring tx
;
121 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
122 * are linked from tx_skb_freelist through skb_entry.link.
124 * NB. Freelist index entries are always going to be less than
125 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
126 * greater than PAGE_OFFSET: we use this property to distinguish
132 } tx_skbs
[NET_TX_RING_SIZE
];
133 grant_ref_t gref_tx_head
;
134 grant_ref_t grant_tx_ref
[NET_TX_RING_SIZE
];
135 struct page
*grant_tx_page
[NET_TX_RING_SIZE
];
136 unsigned tx_skb_freelist
;
138 spinlock_t rx_lock ____cacheline_aligned_in_smp
;
139 struct xen_netif_rx_front_ring rx
;
142 struct timer_list rx_refill_timer
;
144 struct sk_buff
*rx_skbs
[NET_RX_RING_SIZE
];
145 grant_ref_t gref_rx_head
;
146 grant_ref_t grant_rx_ref
[NET_RX_RING_SIZE
];
148 unsigned long rx_pfn_array
[NET_RX_RING_SIZE
];
149 struct multicall_entry rx_mcl
[NET_RX_RING_SIZE
+1];
150 struct mmu_update rx_mmu
[NET_RX_RING_SIZE
];
153 struct netfront_info
{
154 struct list_head list
;
155 struct net_device
*netdev
;
157 struct xenbus_device
*xbdev
;
159 /* Multi-queue support */
160 struct netfront_queue
*queues
;
163 struct netfront_stats __percpu
*stats
;
165 atomic_t rx_gso_checksum_fixup
;
168 struct netfront_rx_info
{
169 struct xen_netif_rx_response rx
;
170 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
- 1];
173 static void skb_entry_set_link(union skb_entry
*list
, unsigned short id
)
178 static int skb_entry_is_link(const union skb_entry
*list
)
180 BUILD_BUG_ON(sizeof(list
->skb
) != sizeof(list
->link
));
181 return (unsigned long)list
->skb
< PAGE_OFFSET
;
185 * Access macros for acquiring freeing slots in tx_skbs[].
188 static void add_id_to_freelist(unsigned *head
, union skb_entry
*list
,
191 skb_entry_set_link(&list
[id
], *head
);
195 static unsigned short get_id_from_freelist(unsigned *head
,
196 union skb_entry
*list
)
198 unsigned int id
= *head
;
199 *head
= list
[id
].link
;
203 static int xennet_rxidx(RING_IDX idx
)
205 return idx
& (NET_RX_RING_SIZE
- 1);
208 static struct sk_buff
*xennet_get_rx_skb(struct netfront_queue
*queue
,
211 int i
= xennet_rxidx(ri
);
212 struct sk_buff
*skb
= queue
->rx_skbs
[i
];
213 queue
->rx_skbs
[i
] = NULL
;
217 static grant_ref_t
xennet_get_rx_ref(struct netfront_queue
*queue
,
220 int i
= xennet_rxidx(ri
);
221 grant_ref_t ref
= queue
->grant_rx_ref
[i
];
222 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
227 static int xennet_sysfs_addif(struct net_device
*netdev
);
228 static void xennet_sysfs_delif(struct net_device
*netdev
);
229 #else /* !CONFIG_SYSFS */
230 #define xennet_sysfs_addif(dev) (0)
231 #define xennet_sysfs_delif(dev) do { } while (0)
234 static bool xennet_can_sg(struct net_device
*dev
)
236 return dev
->features
& NETIF_F_SG
;
240 static void rx_refill_timeout(unsigned long data
)
242 struct netfront_queue
*queue
= (struct netfront_queue
*)data
;
243 napi_schedule(&queue
->napi
);
246 static int netfront_tx_slot_available(struct netfront_queue
*queue
)
248 return (queue
->tx
.req_prod_pvt
- queue
->tx
.rsp_cons
) <
249 (NET_TX_RING_SIZE
- MAX_SKB_FRAGS
- 2);
252 static void xennet_maybe_wake_tx(struct netfront_queue
*queue
)
254 struct net_device
*dev
= queue
->info
->netdev
;
255 struct netdev_queue
*dev_queue
= netdev_get_tx_queue(dev
, queue
->id
);
257 if (unlikely(netif_tx_queue_stopped(dev_queue
)) &&
258 netfront_tx_slot_available(queue
) &&
259 likely(netif_running(dev
)))
260 netif_tx_wake_queue(netdev_get_tx_queue(dev
, queue
->id
));
264 static struct sk_buff
*xennet_alloc_one_rx_buffer(struct netfront_queue
*queue
)
269 skb
= __netdev_alloc_skb(queue
->info
->netdev
,
270 RX_COPY_THRESHOLD
+ NET_IP_ALIGN
,
271 GFP_ATOMIC
| __GFP_NOWARN
);
275 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
280 skb_add_rx_frag(skb
, 0, page
, 0, 0, PAGE_SIZE
);
282 /* Align ip header to a 16 bytes boundary */
283 skb_reserve(skb
, NET_IP_ALIGN
);
284 skb
->dev
= queue
->info
->netdev
;
290 static void xennet_alloc_rx_buffers(struct netfront_queue
*queue
)
292 RING_IDX req_prod
= queue
->rx
.req_prod_pvt
;
295 if (unlikely(!netif_carrier_ok(queue
->info
->netdev
)))
298 for (req_prod
= queue
->rx
.req_prod_pvt
;
299 req_prod
- queue
->rx
.rsp_cons
< NET_RX_RING_SIZE
;
305 struct xen_netif_rx_request
*req
;
307 skb
= xennet_alloc_one_rx_buffer(queue
);
311 id
= xennet_rxidx(req_prod
);
313 BUG_ON(queue
->rx_skbs
[id
]);
314 queue
->rx_skbs
[id
] = skb
;
316 ref
= gnttab_claim_grant_reference(&queue
->gref_rx_head
);
317 BUG_ON((signed short)ref
< 0);
318 queue
->grant_rx_ref
[id
] = ref
;
320 pfn
= page_to_pfn(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
322 req
= RING_GET_REQUEST(&queue
->rx
, req_prod
);
323 gnttab_grant_foreign_access_ref(ref
,
324 queue
->info
->xbdev
->otherend_id
,
332 queue
->rx
.req_prod_pvt
= req_prod
;
334 /* Not enough requests? Try again later. */
335 if (req_prod
- queue
->rx
.rsp_cons
< NET_RX_SLOTS_MIN
) {
336 mod_timer(&queue
->rx_refill_timer
, jiffies
+ (HZ
/10));
340 wmb(); /* barrier so backend seens requests */
342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->rx
, notify
);
344 notify_remote_via_irq(queue
->rx_irq
);
347 static int xennet_open(struct net_device
*dev
)
349 struct netfront_info
*np
= netdev_priv(dev
);
350 unsigned int num_queues
= dev
->real_num_tx_queues
;
352 struct netfront_queue
*queue
= NULL
;
354 for (i
= 0; i
< num_queues
; ++i
) {
355 queue
= &np
->queues
[i
];
356 napi_enable(&queue
->napi
);
358 spin_lock_bh(&queue
->rx_lock
);
359 if (netif_carrier_ok(dev
)) {
360 xennet_alloc_rx_buffers(queue
);
361 queue
->rx
.sring
->rsp_event
= queue
->rx
.rsp_cons
+ 1;
362 if (RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
))
363 napi_schedule(&queue
->napi
);
365 spin_unlock_bh(&queue
->rx_lock
);
368 netif_tx_start_all_queues(dev
);
373 static void xennet_tx_buf_gc(struct netfront_queue
*queue
)
379 BUG_ON(!netif_carrier_ok(queue
->info
->netdev
));
382 prod
= queue
->tx
.sring
->rsp_prod
;
383 rmb(); /* Ensure we see responses up to 'rp'. */
385 for (cons
= queue
->tx
.rsp_cons
; cons
!= prod
; cons
++) {
386 struct xen_netif_tx_response
*txrsp
;
388 txrsp
= RING_GET_RESPONSE(&queue
->tx
, cons
);
389 if (txrsp
->status
== XEN_NETIF_RSP_NULL
)
393 skb
= queue
->tx_skbs
[id
].skb
;
394 if (unlikely(gnttab_query_foreign_access(
395 queue
->grant_tx_ref
[id
]) != 0)) {
396 pr_alert("%s: warning -- grant still in use by backend domain\n",
400 gnttab_end_foreign_access_ref(
401 queue
->grant_tx_ref
[id
], GNTMAP_readonly
);
402 gnttab_release_grant_reference(
403 &queue
->gref_tx_head
, queue
->grant_tx_ref
[id
]);
404 queue
->grant_tx_ref
[id
] = GRANT_INVALID_REF
;
405 queue
->grant_tx_page
[id
] = NULL
;
406 add_id_to_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
, id
);
407 dev_kfree_skb_irq(skb
);
410 queue
->tx
.rsp_cons
= prod
;
413 * Set a new event, then check for race with update of tx_cons.
414 * Note that it is essential to schedule a callback, no matter
415 * how few buffers are pending. Even if there is space in the
416 * transmit ring, higher layers may be blocked because too much
417 * data is outstanding: in such cases notification from Xen is
418 * likely to be the only kick that we'll get.
420 queue
->tx
.sring
->rsp_event
=
421 prod
+ ((queue
->tx
.sring
->req_prod
- prod
) >> 1) + 1;
422 mb(); /* update shared area */
423 } while ((cons
== prod
) && (prod
!= queue
->tx
.sring
->rsp_prod
));
425 xennet_maybe_wake_tx(queue
);
428 static void xennet_make_frags(struct sk_buff
*skb
, struct netfront_queue
*queue
,
429 struct xen_netif_tx_request
*tx
)
431 char *data
= skb
->data
;
433 RING_IDX prod
= queue
->tx
.req_prod_pvt
;
434 int frags
= skb_shinfo(skb
)->nr_frags
;
435 unsigned int offset
= offset_in_page(data
);
436 unsigned int len
= skb_headlen(skb
);
441 /* While the header overlaps a page boundary (including being
442 larger than a page), split it it into page-sized chunks. */
443 while (len
> PAGE_SIZE
- offset
) {
444 tx
->size
= PAGE_SIZE
- offset
;
445 tx
->flags
|= XEN_NETTXF_more_data
;
450 id
= get_id_from_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
);
451 queue
->tx_skbs
[id
].skb
= skb_get(skb
);
452 tx
= RING_GET_REQUEST(&queue
->tx
, prod
++);
454 ref
= gnttab_claim_grant_reference(&queue
->gref_tx_head
);
455 BUG_ON((signed short)ref
< 0);
457 mfn
= virt_to_mfn(data
);
458 gnttab_grant_foreign_access_ref(ref
, queue
->info
->xbdev
->otherend_id
,
459 mfn
, GNTMAP_readonly
);
461 queue
->grant_tx_page
[id
] = virt_to_page(data
);
462 tx
->gref
= queue
->grant_tx_ref
[id
] = ref
;
468 /* Grant backend access to each skb fragment page. */
469 for (i
= 0; i
< frags
; i
++) {
470 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
471 struct page
*page
= skb_frag_page(frag
);
473 len
= skb_frag_size(frag
);
474 offset
= frag
->page_offset
;
476 /* Data must not cross a page boundary. */
477 BUG_ON(len
+ offset
> PAGE_SIZE
<<compound_order(page
));
479 /* Skip unused frames from start of page */
480 page
+= offset
>> PAGE_SHIFT
;
481 offset
&= ~PAGE_MASK
;
486 BUG_ON(offset
>= PAGE_SIZE
);
488 bytes
= PAGE_SIZE
- offset
;
492 tx
->flags
|= XEN_NETTXF_more_data
;
494 id
= get_id_from_freelist(&queue
->tx_skb_freelist
,
496 queue
->tx_skbs
[id
].skb
= skb_get(skb
);
497 tx
= RING_GET_REQUEST(&queue
->tx
, prod
++);
499 ref
= gnttab_claim_grant_reference(&queue
->gref_tx_head
);
500 BUG_ON((signed short)ref
< 0);
502 mfn
= pfn_to_mfn(page_to_pfn(page
));
503 gnttab_grant_foreign_access_ref(ref
,
504 queue
->info
->xbdev
->otherend_id
,
505 mfn
, GNTMAP_readonly
);
507 queue
->grant_tx_page
[id
] = page
;
508 tx
->gref
= queue
->grant_tx_ref
[id
] = ref
;
517 if (offset
== PAGE_SIZE
&& len
) {
518 BUG_ON(!PageCompound(page
));
525 queue
->tx
.req_prod_pvt
= prod
;
529 * Count how many ring slots are required to send the frags of this
530 * skb. Each frag might be a compound page.
532 static int xennet_count_skb_frag_slots(struct sk_buff
*skb
)
534 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
537 for (i
= 0; i
< frags
; i
++) {
538 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
539 unsigned long size
= skb_frag_size(frag
);
540 unsigned long offset
= frag
->page_offset
;
542 /* Skip unused frames from start of page */
543 offset
&= ~PAGE_MASK
;
545 pages
+= PFN_UP(offset
+ size
);
551 static u16
xennet_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
552 void *accel_priv
, select_queue_fallback_t fallback
)
554 unsigned int num_queues
= dev
->real_num_tx_queues
;
558 /* First, check if there is only one queue */
559 if (num_queues
== 1) {
562 hash
= skb_get_hash(skb
);
563 queue_idx
= hash
% num_queues
;
569 static int xennet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
572 struct netfront_info
*np
= netdev_priv(dev
);
573 struct netfront_stats
*stats
= this_cpu_ptr(np
->stats
);
574 struct xen_netif_tx_request
*tx
;
575 char *data
= skb
->data
;
581 unsigned int offset
= offset_in_page(data
);
582 unsigned int len
= skb_headlen(skb
);
584 struct netfront_queue
*queue
= NULL
;
585 unsigned int num_queues
= dev
->real_num_tx_queues
;
588 /* Drop the packet if no queues are set up */
591 /* Determine which queue to transmit this SKB on */
592 queue_index
= skb_get_queue_mapping(skb
);
593 queue
= &np
->queues
[queue_index
];
595 /* If skb->len is too big for wire format, drop skb and alert
596 * user about misconfiguration.
598 if (unlikely(skb
->len
> XEN_NETIF_MAX_TX_SIZE
)) {
599 net_alert_ratelimited(
600 "xennet: skb->len = %u, too big for wire format\n",
605 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
) +
606 xennet_count_skb_frag_slots(skb
);
607 if (unlikely(slots
> MAX_SKB_FRAGS
+ 1)) {
608 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
610 if (skb_linearize(skb
))
614 spin_lock_irqsave(&queue
->tx_lock
, flags
);
616 if (unlikely(!netif_carrier_ok(dev
) ||
617 (slots
> 1 && !xennet_can_sg(dev
)) ||
618 netif_needs_gso(dev
, skb
, netif_skb_features(skb
)))) {
619 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
623 i
= queue
->tx
.req_prod_pvt
;
625 id
= get_id_from_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
);
626 queue
->tx_skbs
[id
].skb
= skb
;
628 tx
= RING_GET_REQUEST(&queue
->tx
, i
);
631 ref
= gnttab_claim_grant_reference(&queue
->gref_tx_head
);
632 BUG_ON((signed short)ref
< 0);
633 mfn
= virt_to_mfn(data
);
634 gnttab_grant_foreign_access_ref(
635 ref
, queue
->info
->xbdev
->otherend_id
, mfn
, GNTMAP_readonly
);
636 queue
->grant_tx_page
[id
] = virt_to_page(data
);
637 tx
->gref
= queue
->grant_tx_ref
[id
] = ref
;
642 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
644 tx
->flags
|= XEN_NETTXF_csum_blank
| XEN_NETTXF_data_validated
;
645 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
646 /* remote but checksummed. */
647 tx
->flags
|= XEN_NETTXF_data_validated
;
649 if (skb_shinfo(skb
)->gso_size
) {
650 struct xen_netif_extra_info
*gso
;
652 gso
= (struct xen_netif_extra_info
*)
653 RING_GET_REQUEST(&queue
->tx
, ++i
);
655 tx
->flags
|= XEN_NETTXF_extra_info
;
657 gso
->u
.gso
.size
= skb_shinfo(skb
)->gso_size
;
658 gso
->u
.gso
.type
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) ?
659 XEN_NETIF_GSO_TYPE_TCPV6
:
660 XEN_NETIF_GSO_TYPE_TCPV4
;
662 gso
->u
.gso
.features
= 0;
664 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
668 queue
->tx
.req_prod_pvt
= i
+ 1;
670 xennet_make_frags(skb
, queue
, tx
);
673 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
675 notify_remote_via_irq(queue
->tx_irq
);
677 u64_stats_update_begin(&stats
->syncp
);
678 stats
->tx_bytes
+= skb
->len
;
680 u64_stats_update_end(&stats
->syncp
);
682 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
683 xennet_tx_buf_gc(queue
);
685 if (!netfront_tx_slot_available(queue
))
686 netif_tx_stop_queue(netdev_get_tx_queue(dev
, queue
->id
));
688 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
693 dev
->stats
.tx_dropped
++;
694 dev_kfree_skb_any(skb
);
698 static int xennet_close(struct net_device
*dev
)
700 struct netfront_info
*np
= netdev_priv(dev
);
701 unsigned int num_queues
= dev
->real_num_tx_queues
;
703 struct netfront_queue
*queue
;
704 netif_tx_stop_all_queues(np
->netdev
);
705 for (i
= 0; i
< num_queues
; ++i
) {
706 queue
= &np
->queues
[i
];
707 napi_disable(&queue
->napi
);
712 static void xennet_move_rx_slot(struct netfront_queue
*queue
, struct sk_buff
*skb
,
715 int new = xennet_rxidx(queue
->rx
.req_prod_pvt
);
717 BUG_ON(queue
->rx_skbs
[new]);
718 queue
->rx_skbs
[new] = skb
;
719 queue
->grant_rx_ref
[new] = ref
;
720 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->id
= new;
721 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->gref
= ref
;
722 queue
->rx
.req_prod_pvt
++;
725 static int xennet_get_extras(struct netfront_queue
*queue
,
726 struct xen_netif_extra_info
*extras
,
730 struct xen_netif_extra_info
*extra
;
731 struct device
*dev
= &queue
->info
->netdev
->dev
;
732 RING_IDX cons
= queue
->rx
.rsp_cons
;
739 if (unlikely(cons
+ 1 == rp
)) {
741 dev_warn(dev
, "Missing extra info\n");
746 extra
= (struct xen_netif_extra_info
*)
747 RING_GET_RESPONSE(&queue
->rx
, ++cons
);
749 if (unlikely(!extra
->type
||
750 extra
->type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
752 dev_warn(dev
, "Invalid extra type: %d\n",
756 memcpy(&extras
[extra
->type
- 1], extra
,
760 skb
= xennet_get_rx_skb(queue
, cons
);
761 ref
= xennet_get_rx_ref(queue
, cons
);
762 xennet_move_rx_slot(queue
, skb
, ref
);
763 } while (extra
->flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
765 queue
->rx
.rsp_cons
= cons
;
769 static int xennet_get_responses(struct netfront_queue
*queue
,
770 struct netfront_rx_info
*rinfo
, RING_IDX rp
,
771 struct sk_buff_head
*list
)
773 struct xen_netif_rx_response
*rx
= &rinfo
->rx
;
774 struct xen_netif_extra_info
*extras
= rinfo
->extras
;
775 struct device
*dev
= &queue
->info
->netdev
->dev
;
776 RING_IDX cons
= queue
->rx
.rsp_cons
;
777 struct sk_buff
*skb
= xennet_get_rx_skb(queue
, cons
);
778 grant_ref_t ref
= xennet_get_rx_ref(queue
, cons
);
779 int max
= MAX_SKB_FRAGS
+ (rx
->status
<= RX_COPY_THRESHOLD
);
784 if (rx
->flags
& XEN_NETRXF_extra_info
) {
785 err
= xennet_get_extras(queue
, extras
, rp
);
786 cons
= queue
->rx
.rsp_cons
;
790 if (unlikely(rx
->status
< 0 ||
791 rx
->offset
+ rx
->status
> PAGE_SIZE
)) {
793 dev_warn(dev
, "rx->offset: %x, size: %u\n",
794 rx
->offset
, rx
->status
);
795 xennet_move_rx_slot(queue
, skb
, ref
);
801 * This definitely indicates a bug, either in this driver or in
802 * the backend driver. In future this should flag the bad
803 * situation to the system controller to reboot the backend.
805 if (ref
== GRANT_INVALID_REF
) {
807 dev_warn(dev
, "Bad rx response id %d.\n",
813 ret
= gnttab_end_foreign_access_ref(ref
, 0);
816 gnttab_release_grant_reference(&queue
->gref_rx_head
, ref
);
818 __skb_queue_tail(list
, skb
);
821 if (!(rx
->flags
& XEN_NETRXF_more_data
))
824 if (cons
+ slots
== rp
) {
826 dev_warn(dev
, "Need more slots\n");
831 rx
= RING_GET_RESPONSE(&queue
->rx
, cons
+ slots
);
832 skb
= xennet_get_rx_skb(queue
, cons
+ slots
);
833 ref
= xennet_get_rx_ref(queue
, cons
+ slots
);
837 if (unlikely(slots
> max
)) {
839 dev_warn(dev
, "Too many slots\n");
844 queue
->rx
.rsp_cons
= cons
+ slots
;
849 static int xennet_set_skb_gso(struct sk_buff
*skb
,
850 struct xen_netif_extra_info
*gso
)
852 if (!gso
->u
.gso
.size
) {
854 pr_warn("GSO size must not be zero\n");
858 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
&&
859 gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV6
) {
861 pr_warn("Bad GSO type %d\n", gso
->u
.gso
.type
);
865 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
866 skb_shinfo(skb
)->gso_type
=
867 (gso
->u
.gso
.type
== XEN_NETIF_GSO_TYPE_TCPV4
) ?
871 /* Header must be checked, and gso_segs computed. */
872 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
873 skb_shinfo(skb
)->gso_segs
= 0;
878 static RING_IDX
xennet_fill_frags(struct netfront_queue
*queue
,
880 struct sk_buff_head
*list
)
882 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
883 RING_IDX cons
= queue
->rx
.rsp_cons
;
884 struct sk_buff
*nskb
;
886 while ((nskb
= __skb_dequeue(list
))) {
887 struct xen_netif_rx_response
*rx
=
888 RING_GET_RESPONSE(&queue
->rx
, ++cons
);
889 skb_frag_t
*nfrag
= &skb_shinfo(nskb
)->frags
[0];
891 if (shinfo
->nr_frags
== MAX_SKB_FRAGS
) {
892 unsigned int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
894 BUG_ON(pull_to
<= skb_headlen(skb
));
895 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
897 BUG_ON(shinfo
->nr_frags
>= MAX_SKB_FRAGS
);
899 skb_add_rx_frag(skb
, shinfo
->nr_frags
, skb_frag_page(nfrag
),
900 rx
->offset
, rx
->status
, PAGE_SIZE
);
902 skb_shinfo(nskb
)->nr_frags
= 0;
909 static int checksum_setup(struct net_device
*dev
, struct sk_buff
*skb
)
911 bool recalculate_partial_csum
= false;
914 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
915 * peers can fail to set NETRXF_csum_blank when sending a GSO
916 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
917 * recalculate the partial checksum.
919 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
920 struct netfront_info
*np
= netdev_priv(dev
);
921 atomic_inc(&np
->rx_gso_checksum_fixup
);
922 skb
->ip_summed
= CHECKSUM_PARTIAL
;
923 recalculate_partial_csum
= true;
926 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
927 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
930 return skb_checksum_setup(skb
, recalculate_partial_csum
);
933 static int handle_incoming_queue(struct netfront_queue
*queue
,
934 struct sk_buff_head
*rxq
)
936 struct netfront_stats
*stats
= this_cpu_ptr(queue
->info
->stats
);
937 int packets_dropped
= 0;
940 while ((skb
= __skb_dequeue(rxq
)) != NULL
) {
941 int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
943 if (pull_to
> skb_headlen(skb
))
944 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
946 /* Ethernet work: Delayed to here as it peeks the header. */
947 skb
->protocol
= eth_type_trans(skb
, queue
->info
->netdev
);
948 skb_reset_network_header(skb
);
950 if (checksum_setup(queue
->info
->netdev
, skb
)) {
953 queue
->info
->netdev
->stats
.rx_errors
++;
957 u64_stats_update_begin(&stats
->syncp
);
959 stats
->rx_bytes
+= skb
->len
;
960 u64_stats_update_end(&stats
->syncp
);
963 napi_gro_receive(&queue
->napi
, skb
);
966 return packets_dropped
;
969 static int xennet_poll(struct napi_struct
*napi
, int budget
)
971 struct netfront_queue
*queue
= container_of(napi
, struct netfront_queue
, napi
);
972 struct net_device
*dev
= queue
->info
->netdev
;
974 struct netfront_rx_info rinfo
;
975 struct xen_netif_rx_response
*rx
= &rinfo
.rx
;
976 struct xen_netif_extra_info
*extras
= rinfo
.extras
;
979 struct sk_buff_head rxq
;
980 struct sk_buff_head errq
;
981 struct sk_buff_head tmpq
;
985 spin_lock(&queue
->rx_lock
);
987 skb_queue_head_init(&rxq
);
988 skb_queue_head_init(&errq
);
989 skb_queue_head_init(&tmpq
);
991 rp
= queue
->rx
.sring
->rsp_prod
;
992 rmb(); /* Ensure we see queued responses up to 'rp'. */
994 i
= queue
->rx
.rsp_cons
;
996 while ((i
!= rp
) && (work_done
< budget
)) {
997 memcpy(rx
, RING_GET_RESPONSE(&queue
->rx
, i
), sizeof(*rx
));
998 memset(extras
, 0, sizeof(rinfo
.extras
));
1000 err
= xennet_get_responses(queue
, &rinfo
, rp
, &tmpq
);
1002 if (unlikely(err
)) {
1004 while ((skb
= __skb_dequeue(&tmpq
)))
1005 __skb_queue_tail(&errq
, skb
);
1006 dev
->stats
.rx_errors
++;
1007 i
= queue
->rx
.rsp_cons
;
1011 skb
= __skb_dequeue(&tmpq
);
1013 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1014 struct xen_netif_extra_info
*gso
;
1015 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1017 if (unlikely(xennet_set_skb_gso(skb
, gso
))) {
1018 __skb_queue_head(&tmpq
, skb
);
1019 queue
->rx
.rsp_cons
+= skb_queue_len(&tmpq
);
1024 NETFRONT_SKB_CB(skb
)->pull_to
= rx
->status
;
1025 if (NETFRONT_SKB_CB(skb
)->pull_to
> RX_COPY_THRESHOLD
)
1026 NETFRONT_SKB_CB(skb
)->pull_to
= RX_COPY_THRESHOLD
;
1028 skb_shinfo(skb
)->frags
[0].page_offset
= rx
->offset
;
1029 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], rx
->status
);
1030 skb
->data_len
= rx
->status
;
1031 skb
->len
+= rx
->status
;
1033 i
= xennet_fill_frags(queue
, skb
, &tmpq
);
1035 if (rx
->flags
& XEN_NETRXF_csum_blank
)
1036 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1037 else if (rx
->flags
& XEN_NETRXF_data_validated
)
1038 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1040 __skb_queue_tail(&rxq
, skb
);
1042 queue
->rx
.rsp_cons
= ++i
;
1046 __skb_queue_purge(&errq
);
1048 work_done
-= handle_incoming_queue(queue
, &rxq
);
1050 xennet_alloc_rx_buffers(queue
);
1052 if (work_done
< budget
) {
1055 napi_gro_flush(napi
, false);
1057 local_irq_save(flags
);
1059 RING_FINAL_CHECK_FOR_RESPONSES(&queue
->rx
, more_to_do
);
1061 __napi_complete(napi
);
1063 local_irq_restore(flags
);
1066 spin_unlock(&queue
->rx_lock
);
1071 static int xennet_change_mtu(struct net_device
*dev
, int mtu
)
1073 int max
= xennet_can_sg(dev
) ?
1074 XEN_NETIF_MAX_TX_SIZE
- MAX_TCP_HEADER
: ETH_DATA_LEN
;
1082 static struct rtnl_link_stats64
*xennet_get_stats64(struct net_device
*dev
,
1083 struct rtnl_link_stats64
*tot
)
1085 struct netfront_info
*np
= netdev_priv(dev
);
1088 for_each_possible_cpu(cpu
) {
1089 struct netfront_stats
*stats
= per_cpu_ptr(np
->stats
, cpu
);
1090 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1094 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1096 rx_packets
= stats
->rx_packets
;
1097 tx_packets
= stats
->tx_packets
;
1098 rx_bytes
= stats
->rx_bytes
;
1099 tx_bytes
= stats
->tx_bytes
;
1100 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1102 tot
->rx_packets
+= rx_packets
;
1103 tot
->tx_packets
+= tx_packets
;
1104 tot
->rx_bytes
+= rx_bytes
;
1105 tot
->tx_bytes
+= tx_bytes
;
1108 tot
->rx_errors
= dev
->stats
.rx_errors
;
1109 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1114 static void xennet_release_tx_bufs(struct netfront_queue
*queue
)
1116 struct sk_buff
*skb
;
1119 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1120 /* Skip over entries which are actually freelist references */
1121 if (skb_entry_is_link(&queue
->tx_skbs
[i
]))
1124 skb
= queue
->tx_skbs
[i
].skb
;
1125 get_page(queue
->grant_tx_page
[i
]);
1126 gnttab_end_foreign_access(queue
->grant_tx_ref
[i
],
1128 (unsigned long)page_address(queue
->grant_tx_page
[i
]));
1129 queue
->grant_tx_page
[i
] = NULL
;
1130 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1131 add_id_to_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
, i
);
1132 dev_kfree_skb_irq(skb
);
1136 static void xennet_release_rx_bufs(struct netfront_queue
*queue
)
1140 spin_lock_bh(&queue
->rx_lock
);
1142 for (id
= 0; id
< NET_RX_RING_SIZE
; id
++) {
1143 struct sk_buff
*skb
;
1146 skb
= queue
->rx_skbs
[id
];
1150 ref
= queue
->grant_rx_ref
[id
];
1151 if (ref
== GRANT_INVALID_REF
)
1154 page
= skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
1156 /* gnttab_end_foreign_access() needs a page ref until
1157 * foreign access is ended (which may be deferred).
1160 gnttab_end_foreign_access(ref
, 0,
1161 (unsigned long)page_address(page
));
1162 queue
->grant_rx_ref
[id
] = GRANT_INVALID_REF
;
1167 spin_unlock_bh(&queue
->rx_lock
);
1170 static netdev_features_t
xennet_fix_features(struct net_device
*dev
,
1171 netdev_features_t features
)
1173 struct netfront_info
*np
= netdev_priv(dev
);
1176 if (features
& NETIF_F_SG
) {
1177 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
, "feature-sg",
1182 features
&= ~NETIF_F_SG
;
1185 if (features
& NETIF_F_IPV6_CSUM
) {
1186 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1187 "feature-ipv6-csum-offload", "%d", &val
) < 0)
1191 features
&= ~NETIF_F_IPV6_CSUM
;
1194 if (features
& NETIF_F_TSO
) {
1195 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1196 "feature-gso-tcpv4", "%d", &val
) < 0)
1200 features
&= ~NETIF_F_TSO
;
1203 if (features
& NETIF_F_TSO6
) {
1204 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1205 "feature-gso-tcpv6", "%d", &val
) < 0)
1209 features
&= ~NETIF_F_TSO6
;
1215 static int xennet_set_features(struct net_device
*dev
,
1216 netdev_features_t features
)
1218 if (!(features
& NETIF_F_SG
) && dev
->mtu
> ETH_DATA_LEN
) {
1219 netdev_info(dev
, "Reducing MTU because no SG offload");
1220 dev
->mtu
= ETH_DATA_LEN
;
1226 static irqreturn_t
xennet_tx_interrupt(int irq
, void *dev_id
)
1228 struct netfront_queue
*queue
= dev_id
;
1229 unsigned long flags
;
1231 spin_lock_irqsave(&queue
->tx_lock
, flags
);
1232 xennet_tx_buf_gc(queue
);
1233 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
1238 static irqreturn_t
xennet_rx_interrupt(int irq
, void *dev_id
)
1240 struct netfront_queue
*queue
= dev_id
;
1241 struct net_device
*dev
= queue
->info
->netdev
;
1243 if (likely(netif_carrier_ok(dev
) &&
1244 RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
)))
1245 napi_schedule(&queue
->napi
);
1250 static irqreturn_t
xennet_interrupt(int irq
, void *dev_id
)
1252 xennet_tx_interrupt(irq
, dev_id
);
1253 xennet_rx_interrupt(irq
, dev_id
);
1257 #ifdef CONFIG_NET_POLL_CONTROLLER
1258 static void xennet_poll_controller(struct net_device
*dev
)
1260 /* Poll each queue */
1261 struct netfront_info
*info
= netdev_priv(dev
);
1262 unsigned int num_queues
= dev
->real_num_tx_queues
;
1264 for (i
= 0; i
< num_queues
; ++i
)
1265 xennet_interrupt(0, &info
->queues
[i
]);
1269 static const struct net_device_ops xennet_netdev_ops
= {
1270 .ndo_open
= xennet_open
,
1271 .ndo_stop
= xennet_close
,
1272 .ndo_start_xmit
= xennet_start_xmit
,
1273 .ndo_change_mtu
= xennet_change_mtu
,
1274 .ndo_get_stats64
= xennet_get_stats64
,
1275 .ndo_set_mac_address
= eth_mac_addr
,
1276 .ndo_validate_addr
= eth_validate_addr
,
1277 .ndo_fix_features
= xennet_fix_features
,
1278 .ndo_set_features
= xennet_set_features
,
1279 .ndo_select_queue
= xennet_select_queue
,
1280 #ifdef CONFIG_NET_POLL_CONTROLLER
1281 .ndo_poll_controller
= xennet_poll_controller
,
1285 static struct net_device
*xennet_create_dev(struct xenbus_device
*dev
)
1288 struct net_device
*netdev
;
1289 struct netfront_info
*np
;
1291 netdev
= alloc_etherdev_mq(sizeof(struct netfront_info
), xennet_max_queues
);
1293 return ERR_PTR(-ENOMEM
);
1295 np
= netdev_priv(netdev
);
1298 /* No need to use rtnl_lock() before the call below as it
1299 * happens before register_netdev().
1301 netif_set_real_num_tx_queues(netdev
, 0);
1305 np
->stats
= netdev_alloc_pcpu_stats(struct netfront_stats
);
1306 if (np
->stats
== NULL
)
1309 netdev
->netdev_ops
= &xennet_netdev_ops
;
1311 netdev
->features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
1313 netdev
->hw_features
= NETIF_F_SG
|
1315 NETIF_F_TSO
| NETIF_F_TSO6
;
1318 * Assume that all hw features are available for now. This set
1319 * will be adjusted by the call to netdev_update_features() in
1320 * xennet_connect() which is the earliest point where we can
1321 * negotiate with the backend regarding supported features.
1323 netdev
->features
|= netdev
->hw_features
;
1325 netdev
->ethtool_ops
= &xennet_ethtool_ops
;
1326 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1328 netif_set_gso_max_size(netdev
, XEN_NETIF_MAX_TX_SIZE
- MAX_TCP_HEADER
);
1330 np
->netdev
= netdev
;
1332 netif_carrier_off(netdev
);
1337 free_netdev(netdev
);
1338 return ERR_PTR(err
);
1342 * Entry point to this code when a new device is created. Allocate the basic
1343 * structures and the ring buffers for communication with the backend, and
1344 * inform the backend of the appropriate details for those.
1346 static int netfront_probe(struct xenbus_device
*dev
,
1347 const struct xenbus_device_id
*id
)
1350 struct net_device
*netdev
;
1351 struct netfront_info
*info
;
1353 netdev
= xennet_create_dev(dev
);
1354 if (IS_ERR(netdev
)) {
1355 err
= PTR_ERR(netdev
);
1356 xenbus_dev_fatal(dev
, err
, "creating netdev");
1360 info
= netdev_priv(netdev
);
1361 dev_set_drvdata(&dev
->dev
, info
);
1363 err
= register_netdev(info
->netdev
);
1365 pr_warn("%s: register_netdev err=%d\n", __func__
, err
);
1369 err
= xennet_sysfs_addif(info
->netdev
);
1371 unregister_netdev(info
->netdev
);
1372 pr_warn("%s: add sysfs failed err=%d\n", __func__
, err
);
1379 free_netdev(netdev
);
1380 dev_set_drvdata(&dev
->dev
, NULL
);
1384 static void xennet_end_access(int ref
, void *page
)
1386 /* This frees the page as a side-effect */
1387 if (ref
!= GRANT_INVALID_REF
)
1388 gnttab_end_foreign_access(ref
, 0, (unsigned long)page
);
1391 static void xennet_disconnect_backend(struct netfront_info
*info
)
1394 unsigned int num_queues
= info
->netdev
->real_num_tx_queues
;
1396 netif_carrier_off(info
->netdev
);
1398 for (i
= 0; i
< num_queues
; ++i
) {
1399 struct netfront_queue
*queue
= &info
->queues
[i
];
1401 if (queue
->tx_irq
&& (queue
->tx_irq
== queue
->rx_irq
))
1402 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1403 if (queue
->tx_irq
&& (queue
->tx_irq
!= queue
->rx_irq
)) {
1404 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1405 unbind_from_irqhandler(queue
->rx_irq
, queue
);
1407 queue
->tx_evtchn
= queue
->rx_evtchn
= 0;
1408 queue
->tx_irq
= queue
->rx_irq
= 0;
1410 napi_synchronize(&queue
->napi
);
1412 xennet_release_tx_bufs(queue
);
1413 xennet_release_rx_bufs(queue
);
1414 gnttab_free_grant_references(queue
->gref_tx_head
);
1415 gnttab_free_grant_references(queue
->gref_rx_head
);
1417 /* End access and free the pages */
1418 xennet_end_access(queue
->tx_ring_ref
, queue
->tx
.sring
);
1419 xennet_end_access(queue
->rx_ring_ref
, queue
->rx
.sring
);
1421 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1422 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1423 queue
->tx
.sring
= NULL
;
1424 queue
->rx
.sring
= NULL
;
1429 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1430 * driver restart. We tear down our netif structure and recreate it, but
1431 * leave the device-layer structures intact so that this is transparent to the
1432 * rest of the kernel.
1434 static int netfront_resume(struct xenbus_device
*dev
)
1436 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1438 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1440 xennet_disconnect_backend(info
);
1444 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
1446 char *s
, *e
, *macstr
;
1449 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
1451 return PTR_ERR(macstr
);
1453 for (i
= 0; i
< ETH_ALEN
; i
++) {
1454 mac
[i
] = simple_strtoul(s
, &e
, 16);
1455 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
1466 static int setup_netfront_single(struct netfront_queue
*queue
)
1470 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1474 err
= bind_evtchn_to_irqhandler(queue
->tx_evtchn
,
1476 0, queue
->info
->netdev
->name
, queue
);
1479 queue
->rx_evtchn
= queue
->tx_evtchn
;
1480 queue
->rx_irq
= queue
->tx_irq
= err
;
1485 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1486 queue
->tx_evtchn
= 0;
1491 static int setup_netfront_split(struct netfront_queue
*queue
)
1495 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1498 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->rx_evtchn
);
1500 goto alloc_rx_evtchn_fail
;
1502 snprintf(queue
->tx_irq_name
, sizeof(queue
->tx_irq_name
),
1503 "%s-tx", queue
->name
);
1504 err
= bind_evtchn_to_irqhandler(queue
->tx_evtchn
,
1505 xennet_tx_interrupt
,
1506 0, queue
->tx_irq_name
, queue
);
1509 queue
->tx_irq
= err
;
1511 snprintf(queue
->rx_irq_name
, sizeof(queue
->rx_irq_name
),
1512 "%s-rx", queue
->name
);
1513 err
= bind_evtchn_to_irqhandler(queue
->rx_evtchn
,
1514 xennet_rx_interrupt
,
1515 0, queue
->rx_irq_name
, queue
);
1518 queue
->rx_irq
= err
;
1523 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1526 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->rx_evtchn
);
1527 queue
->rx_evtchn
= 0;
1528 alloc_rx_evtchn_fail
:
1529 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1530 queue
->tx_evtchn
= 0;
1535 static int setup_netfront(struct xenbus_device
*dev
,
1536 struct netfront_queue
*queue
, unsigned int feature_split_evtchn
)
1538 struct xen_netif_tx_sring
*txs
;
1539 struct xen_netif_rx_sring
*rxs
;
1542 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1543 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1544 queue
->rx
.sring
= NULL
;
1545 queue
->tx
.sring
= NULL
;
1547 txs
= (struct xen_netif_tx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1550 xenbus_dev_fatal(dev
, err
, "allocating tx ring page");
1553 SHARED_RING_INIT(txs
);
1554 FRONT_RING_INIT(&queue
->tx
, txs
, PAGE_SIZE
);
1556 err
= xenbus_grant_ring(dev
, virt_to_mfn(txs
));
1558 goto grant_tx_ring_fail
;
1559 queue
->tx_ring_ref
= err
;
1561 rxs
= (struct xen_netif_rx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1564 xenbus_dev_fatal(dev
, err
, "allocating rx ring page");
1565 goto alloc_rx_ring_fail
;
1567 SHARED_RING_INIT(rxs
);
1568 FRONT_RING_INIT(&queue
->rx
, rxs
, PAGE_SIZE
);
1570 err
= xenbus_grant_ring(dev
, virt_to_mfn(rxs
));
1572 goto grant_rx_ring_fail
;
1573 queue
->rx_ring_ref
= err
;
1575 if (feature_split_evtchn
)
1576 err
= setup_netfront_split(queue
);
1577 /* setup single event channel if
1578 * a) feature-split-event-channels == 0
1579 * b) feature-split-event-channels == 1 but failed to setup
1581 if (!feature_split_evtchn
|| (feature_split_evtchn
&& err
))
1582 err
= setup_netfront_single(queue
);
1585 goto alloc_evtchn_fail
;
1589 /* If we fail to setup netfront, it is safe to just revoke access to
1590 * granted pages because backend is not accessing it at this point.
1593 gnttab_end_foreign_access_ref(queue
->rx_ring_ref
, 0);
1595 free_page((unsigned long)rxs
);
1597 gnttab_end_foreign_access_ref(queue
->tx_ring_ref
, 0);
1599 free_page((unsigned long)txs
);
1604 /* Queue-specific initialisation
1605 * This used to be done in xennet_create_dev() but must now
1608 static int xennet_init_queue(struct netfront_queue
*queue
)
1613 spin_lock_init(&queue
->tx_lock
);
1614 spin_lock_init(&queue
->rx_lock
);
1616 init_timer(&queue
->rx_refill_timer
);
1617 queue
->rx_refill_timer
.data
= (unsigned long)queue
;
1618 queue
->rx_refill_timer
.function
= rx_refill_timeout
;
1620 snprintf(queue
->name
, sizeof(queue
->name
), "%s-q%u",
1621 queue
->info
->netdev
->name
, queue
->id
);
1623 /* Initialise tx_skbs as a free chain containing every entry. */
1624 queue
->tx_skb_freelist
= 0;
1625 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1626 skb_entry_set_link(&queue
->tx_skbs
[i
], i
+1);
1627 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1628 queue
->grant_tx_page
[i
] = NULL
;
1631 /* Clear out rx_skbs */
1632 for (i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1633 queue
->rx_skbs
[i
] = NULL
;
1634 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
1637 /* A grant for every tx ring slot */
1638 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE
,
1639 &queue
->gref_tx_head
) < 0) {
1640 pr_alert("can't alloc tx grant refs\n");
1645 /* A grant for every rx ring slot */
1646 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE
,
1647 &queue
->gref_rx_head
) < 0) {
1648 pr_alert("can't alloc rx grant refs\n");
1656 gnttab_free_grant_references(queue
->gref_tx_head
);
1661 static int write_queue_xenstore_keys(struct netfront_queue
*queue
,
1662 struct xenbus_transaction
*xbt
, int write_hierarchical
)
1664 /* Write the queue-specific keys into XenStore in the traditional
1665 * way for a single queue, or in a queue subkeys for multiple
1668 struct xenbus_device
*dev
= queue
->info
->xbdev
;
1670 const char *message
;
1674 /* Choose the correct place to write the keys */
1675 if (write_hierarchical
) {
1676 pathsize
= strlen(dev
->nodename
) + 10;
1677 path
= kzalloc(pathsize
, GFP_KERNEL
);
1680 message
= "out of memory while writing ring references";
1683 snprintf(path
, pathsize
, "%s/queue-%u",
1684 dev
->nodename
, queue
->id
);
1686 path
= (char *)dev
->nodename
;
1689 /* Write ring references */
1690 err
= xenbus_printf(*xbt
, path
, "tx-ring-ref", "%u",
1691 queue
->tx_ring_ref
);
1693 message
= "writing tx-ring-ref";
1697 err
= xenbus_printf(*xbt
, path
, "rx-ring-ref", "%u",
1698 queue
->rx_ring_ref
);
1700 message
= "writing rx-ring-ref";
1704 /* Write event channels; taking into account both shared
1705 * and split event channel scenarios.
1707 if (queue
->tx_evtchn
== queue
->rx_evtchn
) {
1708 /* Shared event channel */
1709 err
= xenbus_printf(*xbt
, path
,
1710 "event-channel", "%u", queue
->tx_evtchn
);
1712 message
= "writing event-channel";
1716 /* Split event channels */
1717 err
= xenbus_printf(*xbt
, path
,
1718 "event-channel-tx", "%u", queue
->tx_evtchn
);
1720 message
= "writing event-channel-tx";
1724 err
= xenbus_printf(*xbt
, path
,
1725 "event-channel-rx", "%u", queue
->rx_evtchn
);
1727 message
= "writing event-channel-rx";
1732 if (write_hierarchical
)
1737 if (write_hierarchical
)
1739 xenbus_dev_fatal(dev
, err
, "%s", message
);
1743 static void xennet_destroy_queues(struct netfront_info
*info
)
1749 for (i
= 0; i
< info
->netdev
->real_num_tx_queues
; i
++) {
1750 struct netfront_queue
*queue
= &info
->queues
[i
];
1752 if (netif_running(info
->netdev
))
1753 napi_disable(&queue
->napi
);
1754 netif_napi_del(&queue
->napi
);
1759 kfree(info
->queues
);
1760 info
->queues
= NULL
;
1763 static int xennet_create_queues(struct netfront_info
*info
,
1764 unsigned int num_queues
)
1769 info
->queues
= kcalloc(num_queues
, sizeof(struct netfront_queue
),
1776 for (i
= 0; i
< num_queues
; i
++) {
1777 struct netfront_queue
*queue
= &info
->queues
[i
];
1782 ret
= xennet_init_queue(queue
);
1784 dev_warn(&info
->netdev
->dev
,
1785 "only created %d queues\n", i
);
1790 netif_napi_add(queue
->info
->netdev
, &queue
->napi
,
1792 if (netif_running(info
->netdev
))
1793 napi_enable(&queue
->napi
);
1796 netif_set_real_num_tx_queues(info
->netdev
, num_queues
);
1800 if (num_queues
== 0) {
1801 dev_err(&info
->netdev
->dev
, "no queues\n");
1807 /* Common code used when first setting up, and when resuming. */
1808 static int talk_to_netback(struct xenbus_device
*dev
,
1809 struct netfront_info
*info
)
1811 const char *message
;
1812 struct xenbus_transaction xbt
;
1814 unsigned int feature_split_evtchn
;
1816 unsigned int max_queues
= 0;
1817 struct netfront_queue
*queue
= NULL
;
1818 unsigned int num_queues
= 1;
1820 info
->netdev
->irq
= 0;
1822 /* Check if backend supports multiple queues */
1823 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1824 "multi-queue-max-queues", "%u", &max_queues
);
1827 num_queues
= min(max_queues
, xennet_max_queues
);
1829 /* Check feature-split-event-channels */
1830 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1831 "feature-split-event-channels", "%u",
1832 &feature_split_evtchn
);
1834 feature_split_evtchn
= 0;
1836 /* Read mac addr. */
1837 err
= xen_net_read_mac(dev
, info
->netdev
->dev_addr
);
1839 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
1844 xennet_destroy_queues(info
);
1846 err
= xennet_create_queues(info
, num_queues
);
1850 /* Create shared ring, alloc event channel -- for each queue */
1851 for (i
= 0; i
< num_queues
; ++i
) {
1852 queue
= &info
->queues
[i
];
1853 err
= setup_netfront(dev
, queue
, feature_split_evtchn
);
1855 /* setup_netfront() will tidy up the current
1856 * queue on error, but we need to clean up
1857 * those already allocated.
1861 netif_set_real_num_tx_queues(info
->netdev
, i
);
1871 err
= xenbus_transaction_start(&xbt
);
1873 xenbus_dev_fatal(dev
, err
, "starting transaction");
1877 if (num_queues
== 1) {
1878 err
= write_queue_xenstore_keys(&info
->queues
[0], &xbt
, 0); /* flat */
1880 goto abort_transaction_no_dev_fatal
;
1882 /* Write the number of queues */
1883 err
= xenbus_printf(xbt
, dev
->nodename
, "multi-queue-num-queues",
1886 message
= "writing multi-queue-num-queues";
1887 goto abort_transaction_no_dev_fatal
;
1890 /* Write the keys for each queue */
1891 for (i
= 0; i
< num_queues
; ++i
) {
1892 queue
= &info
->queues
[i
];
1893 err
= write_queue_xenstore_keys(queue
, &xbt
, 1); /* hierarchical */
1895 goto abort_transaction_no_dev_fatal
;
1899 /* The remaining keys are not queue-specific */
1900 err
= xenbus_printf(xbt
, dev
->nodename
, "request-rx-copy", "%u",
1903 message
= "writing request-rx-copy";
1904 goto abort_transaction
;
1907 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-rx-notify", "%d", 1);
1909 message
= "writing feature-rx-notify";
1910 goto abort_transaction
;
1913 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", 1);
1915 message
= "writing feature-sg";
1916 goto abort_transaction
;
1919 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4", "%d", 1);
1921 message
= "writing feature-gso-tcpv4";
1922 goto abort_transaction
;
1925 err
= xenbus_write(xbt
, dev
->nodename
, "feature-gso-tcpv6", "1");
1927 message
= "writing feature-gso-tcpv6";
1928 goto abort_transaction
;
1931 err
= xenbus_write(xbt
, dev
->nodename
, "feature-ipv6-csum-offload",
1934 message
= "writing feature-ipv6-csum-offload";
1935 goto abort_transaction
;
1938 err
= xenbus_transaction_end(xbt
, 0);
1942 xenbus_dev_fatal(dev
, err
, "completing transaction");
1949 xenbus_dev_fatal(dev
, err
, "%s", message
);
1950 abort_transaction_no_dev_fatal
:
1951 xenbus_transaction_end(xbt
, 1);
1953 xennet_disconnect_backend(info
);
1954 kfree(info
->queues
);
1955 info
->queues
= NULL
;
1957 netif_set_real_num_tx_queues(info
->netdev
, 0);
1963 static int xennet_connect(struct net_device
*dev
)
1965 struct netfront_info
*np
= netdev_priv(dev
);
1966 unsigned int num_queues
= 0;
1968 unsigned int feature_rx_copy
;
1970 struct netfront_queue
*queue
= NULL
;
1972 err
= xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1973 "feature-rx-copy", "%u", &feature_rx_copy
);
1975 feature_rx_copy
= 0;
1977 if (!feature_rx_copy
) {
1979 "backend does not support copying receive path\n");
1983 err
= talk_to_netback(np
->xbdev
, np
);
1987 /* talk_to_netback() sets the correct number of queues */
1988 num_queues
= dev
->real_num_tx_queues
;
1991 netdev_update_features(dev
);
1995 * All public and private state should now be sane. Get
1996 * ready to start sending and receiving packets and give the driver
1997 * domain a kick because we've probably just requeued some
2000 netif_carrier_on(np
->netdev
);
2001 for (j
= 0; j
< num_queues
; ++j
) {
2002 queue
= &np
->queues
[j
];
2004 notify_remote_via_irq(queue
->tx_irq
);
2005 if (queue
->tx_irq
!= queue
->rx_irq
)
2006 notify_remote_via_irq(queue
->rx_irq
);
2008 spin_lock_irq(&queue
->tx_lock
);
2009 xennet_tx_buf_gc(queue
);
2010 spin_unlock_irq(&queue
->tx_lock
);
2012 spin_lock_bh(&queue
->rx_lock
);
2013 xennet_alloc_rx_buffers(queue
);
2014 spin_unlock_bh(&queue
->rx_lock
);
2021 * Callback received when the backend's state changes.
2023 static void netback_changed(struct xenbus_device
*dev
,
2024 enum xenbus_state backend_state
)
2026 struct netfront_info
*np
= dev_get_drvdata(&dev
->dev
);
2027 struct net_device
*netdev
= np
->netdev
;
2029 dev_dbg(&dev
->dev
, "%s\n", xenbus_strstate(backend_state
));
2031 switch (backend_state
) {
2032 case XenbusStateInitialising
:
2033 case XenbusStateInitialised
:
2034 case XenbusStateReconfiguring
:
2035 case XenbusStateReconfigured
:
2036 case XenbusStateUnknown
:
2039 case XenbusStateInitWait
:
2040 if (dev
->state
!= XenbusStateInitialising
)
2042 if (xennet_connect(netdev
) != 0)
2044 xenbus_switch_state(dev
, XenbusStateConnected
);
2047 case XenbusStateConnected
:
2048 netdev_notify_peers(netdev
);
2051 case XenbusStateClosed
:
2052 if (dev
->state
== XenbusStateClosed
)
2054 /* Missed the backend's CLOSING state -- fallthrough */
2055 case XenbusStateClosing
:
2056 xenbus_frontend_closed(dev
);
2061 static const struct xennet_stat
{
2062 char name
[ETH_GSTRING_LEN
];
2064 } xennet_stats
[] = {
2066 "rx_gso_checksum_fixup",
2067 offsetof(struct netfront_info
, rx_gso_checksum_fixup
)
2071 static int xennet_get_sset_count(struct net_device
*dev
, int string_set
)
2073 switch (string_set
) {
2075 return ARRAY_SIZE(xennet_stats
);
2081 static void xennet_get_ethtool_stats(struct net_device
*dev
,
2082 struct ethtool_stats
*stats
, u64
* data
)
2084 void *np
= netdev_priv(dev
);
2087 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2088 data
[i
] = atomic_read((atomic_t
*)(np
+ xennet_stats
[i
].offset
));
2091 static void xennet_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
2095 switch (stringset
) {
2097 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2098 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2099 xennet_stats
[i
].name
, ETH_GSTRING_LEN
);
2104 static const struct ethtool_ops xennet_ethtool_ops
=
2106 .get_link
= ethtool_op_get_link
,
2108 .get_sset_count
= xennet_get_sset_count
,
2109 .get_ethtool_stats
= xennet_get_ethtool_stats
,
2110 .get_strings
= xennet_get_strings
,
2114 static ssize_t
show_rxbuf(struct device
*dev
,
2115 struct device_attribute
*attr
, char *buf
)
2117 return sprintf(buf
, "%lu\n", NET_RX_RING_SIZE
);
2120 static ssize_t
store_rxbuf(struct device
*dev
,
2121 struct device_attribute
*attr
,
2122 const char *buf
, size_t len
)
2125 unsigned long target
;
2127 if (!capable(CAP_NET_ADMIN
))
2130 target
= simple_strtoul(buf
, &endp
, 0);
2134 /* rxbuf_min and rxbuf_max are no longer configurable. */
2139 static struct device_attribute xennet_attrs
[] = {
2140 __ATTR(rxbuf_min
, S_IRUGO
|S_IWUSR
, show_rxbuf
, store_rxbuf
),
2141 __ATTR(rxbuf_max
, S_IRUGO
|S_IWUSR
, show_rxbuf
, store_rxbuf
),
2142 __ATTR(rxbuf_cur
, S_IRUGO
, show_rxbuf
, NULL
),
2145 static int xennet_sysfs_addif(struct net_device
*netdev
)
2150 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++) {
2151 err
= device_create_file(&netdev
->dev
,
2160 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
2164 static void xennet_sysfs_delif(struct net_device
*netdev
)
2168 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++)
2169 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
2172 #endif /* CONFIG_SYSFS */
2174 static int xennet_remove(struct xenbus_device
*dev
)
2176 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2177 unsigned int num_queues
= info
->netdev
->real_num_tx_queues
;
2178 struct netfront_queue
*queue
= NULL
;
2181 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
2183 xennet_disconnect_backend(info
);
2185 xennet_sysfs_delif(info
->netdev
);
2187 unregister_netdev(info
->netdev
);
2189 for (i
= 0; i
< num_queues
; ++i
) {
2190 queue
= &info
->queues
[i
];
2191 del_timer_sync(&queue
->rx_refill_timer
);
2195 kfree(info
->queues
);
2196 info
->queues
= NULL
;
2199 free_percpu(info
->stats
);
2201 free_netdev(info
->netdev
);
2206 static const struct xenbus_device_id netfront_ids
[] = {
2211 static struct xenbus_driver netfront_driver
= {
2212 .ids
= netfront_ids
,
2213 .probe
= netfront_probe
,
2214 .remove
= xennet_remove
,
2215 .resume
= netfront_resume
,
2216 .otherend_changed
= netback_changed
,
2219 static int __init
netif_init(void)
2224 if (!xen_has_pv_nic_devices())
2227 pr_info("Initialising Xen virtual ethernet driver\n");
2229 /* Allow as many queues as there are CPUs, by default */
2230 xennet_max_queues
= num_online_cpus();
2232 return xenbus_register_frontend(&netfront_driver
);
2234 module_init(netif_init
);
2237 static void __exit
netif_exit(void)
2239 xenbus_unregister_driver(&netfront_driver
);
2241 module_exit(netif_exit
);
2243 MODULE_DESCRIPTION("Xen virtual network device frontend");
2244 MODULE_LICENSE("GPL");
2245 MODULE_ALIAS("xen:vif");
2246 MODULE_ALIAS("xennet");