Char: rocket, fix signed/unsigned warning
[deliverable/linux.git] / drivers / net / xen-netfront.c
CommitLineData
0d160211
JF
1/*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/ethtool.h>
38#include <linux/if_ether.h>
39#include <linux/tcp.h>
40#include <linux/udp.h>
41#include <linux/moduleparam.h>
42#include <linux/mm.h>
43#include <net/ip.h>
44
45#include <xen/xenbus.h>
46#include <xen/events.h>
47#include <xen/page.h>
48#include <xen/grant_table.h>
49
50#include <xen/interface/io/netif.h>
51#include <xen/interface/memory.h>
52#include <xen/interface/grant_table.h>
53
54static struct ethtool_ops xennet_ethtool_ops;
55
56struct netfront_cb {
57 struct page *page;
58 unsigned offset;
59};
60
61#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
62
63#define RX_COPY_THRESHOLD 256
64
65#define GRANT_INVALID_REF 0
66
67#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
68#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
69#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
70
71struct netfront_info {
72 struct list_head list;
73 struct net_device *netdev;
74
bea3348e 75 struct napi_struct napi;
0d160211 76
0d160211 77 unsigned int evtchn;
84284d3c 78 struct xenbus_device *xbdev;
0d160211 79
84284d3c
JF
80 spinlock_t tx_lock;
81 struct xen_netif_tx_front_ring tx;
82 int tx_ring_ref;
0d160211
JF
83
84 /*
85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
86 * are linked from tx_skb_freelist through skb_entry.link.
87 *
88 * NB. Freelist index entries are always going to be less than
89 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
90 * greater than PAGE_OFFSET: we use this property to distinguish
91 * them.
92 */
93 union skb_entry {
94 struct sk_buff *skb;
95 unsigned link;
96 } tx_skbs[NET_TX_RING_SIZE];
97 grant_ref_t gref_tx_head;
98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
99 unsigned tx_skb_freelist;
100
84284d3c
JF
101 spinlock_t rx_lock ____cacheline_aligned_in_smp;
102 struct xen_netif_rx_front_ring rx;
103 int rx_ring_ref;
104
105 /* Receive-ring batched refills. */
106#define RX_MIN_TARGET 8
107#define RX_DFL_MIN_TARGET 64
108#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
109 unsigned rx_min_target, rx_max_target, rx_target;
110 struct sk_buff_head rx_batch;
111
112 struct timer_list rx_refill_timer;
113
0d160211
JF
114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
115 grant_ref_t gref_rx_head;
116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
117
0d160211
JF
118 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
120 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
121};
122
123struct netfront_rx_info {
124 struct xen_netif_rx_response rx;
125 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
126};
127
128/*
129 * Access macros for acquiring freeing slots in tx_skbs[].
130 */
131
132static void add_id_to_freelist(unsigned *head, union skb_entry *list,
133 unsigned short id)
134{
135 list[id].link = *head;
136 *head = id;
137}
138
139static unsigned short get_id_from_freelist(unsigned *head,
140 union skb_entry *list)
141{
142 unsigned int id = *head;
143 *head = list[id].link;
144 return id;
145}
146
147static int xennet_rxidx(RING_IDX idx)
148{
149 return idx & (NET_RX_RING_SIZE - 1);
150}
151
152static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
153 RING_IDX ri)
154{
155 int i = xennet_rxidx(ri);
156 struct sk_buff *skb = np->rx_skbs[i];
157 np->rx_skbs[i] = NULL;
158 return skb;
159}
160
161static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
162 RING_IDX ri)
163{
164 int i = xennet_rxidx(ri);
165 grant_ref_t ref = np->grant_rx_ref[i];
166 np->grant_rx_ref[i] = GRANT_INVALID_REF;
167 return ref;
168}
169
170#ifdef CONFIG_SYSFS
171static int xennet_sysfs_addif(struct net_device *netdev);
172static void xennet_sysfs_delif(struct net_device *netdev);
173#else /* !CONFIG_SYSFS */
174#define xennet_sysfs_addif(dev) (0)
175#define xennet_sysfs_delif(dev) do { } while (0)
176#endif
177
178static int xennet_can_sg(struct net_device *dev)
179{
180 return dev->features & NETIF_F_SG;
181}
182
183
184static void rx_refill_timeout(unsigned long data)
185{
186 struct net_device *dev = (struct net_device *)data;
bea3348e
SH
187 struct netfront_info *np = netdev_priv(dev);
188 netif_rx_schedule(dev, &np->napi);
0d160211
JF
189}
190
191static int netfront_tx_slot_available(struct netfront_info *np)
192{
193 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
194 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
195}
196
197static void xennet_maybe_wake_tx(struct net_device *dev)
198{
199 struct netfront_info *np = netdev_priv(dev);
200
201 if (unlikely(netif_queue_stopped(dev)) &&
202 netfront_tx_slot_available(np) &&
203 likely(netif_running(dev)))
204 netif_wake_queue(dev);
205}
206
207static void xennet_alloc_rx_buffers(struct net_device *dev)
208{
209 unsigned short id;
210 struct netfront_info *np = netdev_priv(dev);
211 struct sk_buff *skb;
212 struct page *page;
213 int i, batch_target, notify;
214 RING_IDX req_prod = np->rx.req_prod_pvt;
0d160211
JF
215 grant_ref_t ref;
216 unsigned long pfn;
217 void *vaddr;
0d160211
JF
218 struct xen_netif_rx_request *req;
219
220 if (unlikely(!netif_carrier_ok(dev)))
221 return;
222
223 /*
224 * Allocate skbuffs greedily, even though we batch updates to the
225 * receive ring. This creates a less bursty demand on the memory
226 * allocator, so should reduce the chance of failed allocation requests
227 * both for ourself and for other kernel subsystems.
228 */
229 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
230 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
231 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
232 GFP_ATOMIC | __GFP_NOWARN);
233 if (unlikely(!skb))
234 goto no_skb;
235
236 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
237 if (!page) {
238 kfree_skb(skb);
239no_skb:
240 /* Any skbuffs queued for refill? Force them out. */
241 if (i != 0)
242 goto refill;
243 /* Could not allocate any skbuffs. Try again later. */
244 mod_timer(&np->rx_refill_timer,
245 jiffies + (HZ/10));
246 break;
247 }
248
249 skb_shinfo(skb)->frags[0].page = page;
250 skb_shinfo(skb)->nr_frags = 1;
251 __skb_queue_tail(&np->rx_batch, skb);
252 }
253
254 /* Is the batch large enough to be worthwhile? */
255 if (i < (np->rx_target/2)) {
256 if (req_prod > np->rx.sring->req_prod)
257 goto push;
258 return;
259 }
260
261 /* Adjust our fill target if we risked running out of buffers. */
262 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
263 ((np->rx_target *= 2) > np->rx_max_target))
264 np->rx_target = np->rx_max_target;
265
266 refill:
5dcddfae 267 for (i = 0; ; i++) {
0d160211
JF
268 skb = __skb_dequeue(&np->rx_batch);
269 if (skb == NULL)
270 break;
271
272 skb->dev = dev;
273
274 id = xennet_rxidx(req_prod + i);
275
276 BUG_ON(np->rx_skbs[id]);
277 np->rx_skbs[id] = skb;
278
279 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
280 BUG_ON((signed short)ref < 0);
281 np->grant_rx_ref[id] = ref;
282
283 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
284 vaddr = page_address(skb_shinfo(skb)->frags[0].page);
285
286 req = RING_GET_REQUEST(&np->rx, req_prod + i);
287 gnttab_grant_foreign_access_ref(ref,
288 np->xbdev->otherend_id,
289 pfn_to_mfn(pfn),
290 0);
291
292 req->id = id;
293 req->gref = ref;
294 }
295
5dcddfae 296 wmb(); /* barrier so backend seens requests */
0d160211
JF
297
298 /* Above is a suitable barrier to ensure backend will see requests. */
299 np->rx.req_prod_pvt = req_prod + i;
300 push:
301 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
302 if (notify)
303 notify_remote_via_irq(np->netdev->irq);
304}
305
306static int xennet_open(struct net_device *dev)
307{
308 struct netfront_info *np = netdev_priv(dev);
309
bea3348e
SH
310 napi_enable(&np->napi);
311
0d160211
JF
312 spin_lock_bh(&np->rx_lock);
313 if (netif_carrier_ok(dev)) {
314 xennet_alloc_rx_buffers(dev);
315 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
316 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
bea3348e 317 netif_rx_schedule(dev, &np->napi);
0d160211
JF
318 }
319 spin_unlock_bh(&np->rx_lock);
320
321 xennet_maybe_wake_tx(dev);
322
323 return 0;
324}
325
326static void xennet_tx_buf_gc(struct net_device *dev)
327{
328 RING_IDX cons, prod;
329 unsigned short id;
330 struct netfront_info *np = netdev_priv(dev);
331 struct sk_buff *skb;
332
333 BUG_ON(!netif_carrier_ok(dev));
334
335 do {
336 prod = np->tx.sring->rsp_prod;
337 rmb(); /* Ensure we see responses up to 'rp'. */
338
339 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
340 struct xen_netif_tx_response *txrsp;
341
342 txrsp = RING_GET_RESPONSE(&np->tx, cons);
343 if (txrsp->status == NETIF_RSP_NULL)
344 continue;
345
346 id = txrsp->id;
347 skb = np->tx_skbs[id].skb;
348 if (unlikely(gnttab_query_foreign_access(
349 np->grant_tx_ref[id]) != 0)) {
350 printk(KERN_ALERT "xennet_tx_buf_gc: warning "
351 "-- grant still in use by backend "
352 "domain.\n");
353 BUG();
354 }
355 gnttab_end_foreign_access_ref(
356 np->grant_tx_ref[id], GNTMAP_readonly);
357 gnttab_release_grant_reference(
358 &np->gref_tx_head, np->grant_tx_ref[id]);
359 np->grant_tx_ref[id] = GRANT_INVALID_REF;
360 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
361 dev_kfree_skb_irq(skb);
362 }
363
364 np->tx.rsp_cons = prod;
365
366 /*
367 * Set a new event, then check for race with update of tx_cons.
368 * Note that it is essential to schedule a callback, no matter
369 * how few buffers are pending. Even if there is space in the
370 * transmit ring, higher layers may be blocked because too much
371 * data is outstanding: in such cases notification from Xen is
372 * likely to be the only kick that we'll get.
373 */
374 np->tx.sring->rsp_event =
375 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
376 mb(); /* update shared area */
377 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
378
379 xennet_maybe_wake_tx(dev);
380}
381
382static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
383 struct xen_netif_tx_request *tx)
384{
385 struct netfront_info *np = netdev_priv(dev);
386 char *data = skb->data;
387 unsigned long mfn;
388 RING_IDX prod = np->tx.req_prod_pvt;
389 int frags = skb_shinfo(skb)->nr_frags;
390 unsigned int offset = offset_in_page(data);
391 unsigned int len = skb_headlen(skb);
392 unsigned int id;
393 grant_ref_t ref;
394 int i;
395
396 /* While the header overlaps a page boundary (including being
397 larger than a page), split it it into page-sized chunks. */
398 while (len > PAGE_SIZE - offset) {
399 tx->size = PAGE_SIZE - offset;
400 tx->flags |= NETTXF_more_data;
401 len -= tx->size;
402 data += tx->size;
403 offset = 0;
404
405 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
406 np->tx_skbs[id].skb = skb_get(skb);
407 tx = RING_GET_REQUEST(&np->tx, prod++);
408 tx->id = id;
409 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
410 BUG_ON((signed short)ref < 0);
411
412 mfn = virt_to_mfn(data);
413 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
414 mfn, GNTMAP_readonly);
415
416 tx->gref = np->grant_tx_ref[id] = ref;
417 tx->offset = offset;
418 tx->size = len;
419 tx->flags = 0;
420 }
421
422 /* Grant backend access to each skb fragment page. */
423 for (i = 0; i < frags; i++) {
424 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
425
426 tx->flags |= NETTXF_more_data;
427
428 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
429 np->tx_skbs[id].skb = skb_get(skb);
430 tx = RING_GET_REQUEST(&np->tx, prod++);
431 tx->id = id;
432 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
433 BUG_ON((signed short)ref < 0);
434
435 mfn = pfn_to_mfn(page_to_pfn(frag->page));
436 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
437 mfn, GNTMAP_readonly);
438
439 tx->gref = np->grant_tx_ref[id] = ref;
440 tx->offset = frag->page_offset;
441 tx->size = frag->size;
442 tx->flags = 0;
443 }
444
445 np->tx.req_prod_pvt = prod;
446}
447
448static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
449{
450 unsigned short id;
451 struct netfront_info *np = netdev_priv(dev);
452 struct xen_netif_tx_request *tx;
453 struct xen_netif_extra_info *extra;
454 char *data = skb->data;
455 RING_IDX i;
456 grant_ref_t ref;
457 unsigned long mfn;
458 int notify;
459 int frags = skb_shinfo(skb)->nr_frags;
460 unsigned int offset = offset_in_page(data);
461 unsigned int len = skb_headlen(skb);
462
463 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
464 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
465 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
466 frags);
467 dump_stack();
468 goto drop;
469 }
470
471 spin_lock_irq(&np->tx_lock);
472
473 if (unlikely(!netif_carrier_ok(dev) ||
474 (frags > 1 && !xennet_can_sg(dev)) ||
475 netif_needs_gso(dev, skb))) {
476 spin_unlock_irq(&np->tx_lock);
477 goto drop;
478 }
479
480 i = np->tx.req_prod_pvt;
481
482 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
483 np->tx_skbs[id].skb = skb;
484
485 tx = RING_GET_REQUEST(&np->tx, i);
486
487 tx->id = id;
488 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
489 BUG_ON((signed short)ref < 0);
490 mfn = virt_to_mfn(data);
491 gnttab_grant_foreign_access_ref(
492 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
493 tx->gref = np->grant_tx_ref[id] = ref;
494 tx->offset = offset;
495 tx->size = len;
496 extra = NULL;
497
498 tx->flags = 0;
499 if (skb->ip_summed == CHECKSUM_PARTIAL)
500 /* local packet? */
501 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
502 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
503 /* remote but checksummed. */
504 tx->flags |= NETTXF_data_validated;
505
506 if (skb_shinfo(skb)->gso_size) {
507 struct xen_netif_extra_info *gso;
508
509 gso = (struct xen_netif_extra_info *)
510 RING_GET_REQUEST(&np->tx, ++i);
511
512 if (extra)
513 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
514 else
515 tx->flags |= NETTXF_extra_info;
516
517 gso->u.gso.size = skb_shinfo(skb)->gso_size;
518 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
519 gso->u.gso.pad = 0;
520 gso->u.gso.features = 0;
521
522 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
523 gso->flags = 0;
524 extra = gso;
525 }
526
527 np->tx.req_prod_pvt = i + 1;
528
529 xennet_make_frags(skb, dev, tx);
530 tx->size = skb->len;
531
532 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
533 if (notify)
534 notify_remote_via_irq(np->netdev->irq);
535
09f75cd7
JG
536 dev->stats.tx_bytes += skb->len;
537 dev->stats.tx_packets++;
10a273a6
JF
538
539 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
0d160211
JF
540 xennet_tx_buf_gc(dev);
541
542 if (!netfront_tx_slot_available(np))
543 netif_stop_queue(dev);
544
545 spin_unlock_irq(&np->tx_lock);
546
0d160211
JF
547 return 0;
548
549 drop:
09f75cd7 550 dev->stats.tx_dropped++;
0d160211
JF
551 dev_kfree_skb(skb);
552 return 0;
553}
554
555static int xennet_close(struct net_device *dev)
556{
557 struct netfront_info *np = netdev_priv(dev);
558 netif_stop_queue(np->netdev);
bea3348e 559 napi_disable(&np->napi);
0d160211
JF
560 return 0;
561}
562
0d160211
JF
563static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
564 grant_ref_t ref)
565{
566 int new = xennet_rxidx(np->rx.req_prod_pvt);
567
568 BUG_ON(np->rx_skbs[new]);
569 np->rx_skbs[new] = skb;
570 np->grant_rx_ref[new] = ref;
571 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
572 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
573 np->rx.req_prod_pvt++;
574}
575
576static int xennet_get_extras(struct netfront_info *np,
577 struct xen_netif_extra_info *extras,
578 RING_IDX rp)
579
580{
581 struct xen_netif_extra_info *extra;
582 struct device *dev = &np->netdev->dev;
583 RING_IDX cons = np->rx.rsp_cons;
584 int err = 0;
585
586 do {
587 struct sk_buff *skb;
588 grant_ref_t ref;
589
590 if (unlikely(cons + 1 == rp)) {
591 if (net_ratelimit())
592 dev_warn(dev, "Missing extra info\n");
593 err = -EBADR;
594 break;
595 }
596
597 extra = (struct xen_netif_extra_info *)
598 RING_GET_RESPONSE(&np->rx, ++cons);
599
600 if (unlikely(!extra->type ||
601 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
602 if (net_ratelimit())
603 dev_warn(dev, "Invalid extra type: %d\n",
604 extra->type);
605 err = -EINVAL;
606 } else {
607 memcpy(&extras[extra->type - 1], extra,
608 sizeof(*extra));
609 }
610
611 skb = xennet_get_rx_skb(np, cons);
612 ref = xennet_get_rx_ref(np, cons);
613 xennet_move_rx_slot(np, skb, ref);
614 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
615
616 np->rx.rsp_cons = cons;
617 return err;
618}
619
620static int xennet_get_responses(struct netfront_info *np,
621 struct netfront_rx_info *rinfo, RING_IDX rp,
622 struct sk_buff_head *list)
623{
624 struct xen_netif_rx_response *rx = &rinfo->rx;
625 struct xen_netif_extra_info *extras = rinfo->extras;
626 struct device *dev = &np->netdev->dev;
627 RING_IDX cons = np->rx.rsp_cons;
628 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
629 grant_ref_t ref = xennet_get_rx_ref(np, cons);
630 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
631 int frags = 1;
632 int err = 0;
633 unsigned long ret;
634
635 if (rx->flags & NETRXF_extra_info) {
636 err = xennet_get_extras(np, extras, rp);
637 cons = np->rx.rsp_cons;
638 }
639
640 for (;;) {
641 if (unlikely(rx->status < 0 ||
642 rx->offset + rx->status > PAGE_SIZE)) {
643 if (net_ratelimit())
644 dev_warn(dev, "rx->offset: %x, size: %u\n",
645 rx->offset, rx->status);
646 xennet_move_rx_slot(np, skb, ref);
647 err = -EINVAL;
648 goto next;
649 }
650
651 /*
652 * This definitely indicates a bug, either in this driver or in
653 * the backend driver. In future this should flag the bad
654 * situation to the system controller to reboot the backed.
655 */
656 if (ref == GRANT_INVALID_REF) {
657 if (net_ratelimit())
658 dev_warn(dev, "Bad rx response id %d.\n",
659 rx->id);
660 err = -EINVAL;
661 goto next;
662 }
663
664 ret = gnttab_end_foreign_access_ref(ref, 0);
665 BUG_ON(!ret);
666
667 gnttab_release_grant_reference(&np->gref_rx_head, ref);
668
669 __skb_queue_tail(list, skb);
670
671next:
672 if (!(rx->flags & NETRXF_more_data))
673 break;
674
675 if (cons + frags == rp) {
676 if (net_ratelimit())
677 dev_warn(dev, "Need more frags\n");
678 err = -ENOENT;
679 break;
680 }
681
682 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
683 skb = xennet_get_rx_skb(np, cons + frags);
684 ref = xennet_get_rx_ref(np, cons + frags);
685 frags++;
686 }
687
688 if (unlikely(frags > max)) {
689 if (net_ratelimit())
690 dev_warn(dev, "Too many frags\n");
691 err = -E2BIG;
692 }
693
694 if (unlikely(err))
695 np->rx.rsp_cons = cons + frags;
696
697 return err;
698}
699
700static int xennet_set_skb_gso(struct sk_buff *skb,
701 struct xen_netif_extra_info *gso)
702{
703 if (!gso->u.gso.size) {
704 if (net_ratelimit())
705 printk(KERN_WARNING "GSO size must not be zero.\n");
706 return -EINVAL;
707 }
708
709 /* Currently only TCPv4 S.O. is supported. */
710 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
711 if (net_ratelimit())
712 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
713 return -EINVAL;
714 }
715
716 skb_shinfo(skb)->gso_size = gso->u.gso.size;
717 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
718
719 /* Header must be checked, and gso_segs computed. */
720 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
721 skb_shinfo(skb)->gso_segs = 0;
722
723 return 0;
724}
725
726static RING_IDX xennet_fill_frags(struct netfront_info *np,
727 struct sk_buff *skb,
728 struct sk_buff_head *list)
729{
730 struct skb_shared_info *shinfo = skb_shinfo(skb);
731 int nr_frags = shinfo->nr_frags;
732 RING_IDX cons = np->rx.rsp_cons;
733 skb_frag_t *frag = shinfo->frags + nr_frags;
734 struct sk_buff *nskb;
735
736 while ((nskb = __skb_dequeue(list))) {
737 struct xen_netif_rx_response *rx =
738 RING_GET_RESPONSE(&np->rx, ++cons);
739
740 frag->page = skb_shinfo(nskb)->frags[0].page;
741 frag->page_offset = rx->offset;
742 frag->size = rx->status;
743
744 skb->data_len += rx->status;
745
746 skb_shinfo(nskb)->nr_frags = 0;
747 kfree_skb(nskb);
748
749 frag++;
750 nr_frags++;
751 }
752
753 shinfo->nr_frags = nr_frags;
754 return cons;
755}
756
757static int skb_checksum_setup(struct sk_buff *skb)
758{
759 struct iphdr *iph;
760 unsigned char *th;
761 int err = -EPROTO;
762
763 if (skb->protocol != htons(ETH_P_IP))
764 goto out;
765
766 iph = (void *)skb->data;
767 th = skb->data + 4 * iph->ihl;
768 if (th >= skb_tail_pointer(skb))
769 goto out;
770
771 skb->csum_start = th - skb->head;
772 switch (iph->protocol) {
773 case IPPROTO_TCP:
774 skb->csum_offset = offsetof(struct tcphdr, check);
775 break;
776 case IPPROTO_UDP:
777 skb->csum_offset = offsetof(struct udphdr, check);
778 break;
779 default:
780 if (net_ratelimit())
781 printk(KERN_ERR "Attempting to checksum a non-"
782 "TCP/UDP packet, dropping a protocol"
783 " %d packet", iph->protocol);
784 goto out;
785 }
786
787 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
788 goto out;
789
790 err = 0;
791
792out:
793 return err;
794}
795
796static int handle_incoming_queue(struct net_device *dev,
09f75cd7 797 struct sk_buff_head *rxq)
0d160211 798{
0d160211
JF
799 int packets_dropped = 0;
800 struct sk_buff *skb;
801
802 while ((skb = __skb_dequeue(rxq)) != NULL) {
803 struct page *page = NETFRONT_SKB_CB(skb)->page;
804 void *vaddr = page_address(page);
805 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
806
807 memcpy(skb->data, vaddr + offset,
808 skb_headlen(skb));
809
810 if (page != skb_shinfo(skb)->frags[0].page)
811 __free_page(page);
812
813 /* Ethernet work: Delayed to here as it peeks the header. */
814 skb->protocol = eth_type_trans(skb, dev);
815
816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
817 if (skb_checksum_setup(skb)) {
818 kfree_skb(skb);
819 packets_dropped++;
09f75cd7 820 dev->stats.rx_errors++;
0d160211
JF
821 continue;
822 }
823 }
824
09f75cd7
JG
825 dev->stats.rx_packets++;
826 dev->stats.rx_bytes += skb->len;
0d160211
JF
827
828 /* Pass it up. */
829 netif_receive_skb(skb);
830 dev->last_rx = jiffies;
831 }
832
833 return packets_dropped;
834}
835
bea3348e 836static int xennet_poll(struct napi_struct *napi, int budget)
0d160211 837{
bea3348e
SH
838 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
839 struct net_device *dev = np->netdev;
0d160211
JF
840 struct sk_buff *skb;
841 struct netfront_rx_info rinfo;
842 struct xen_netif_rx_response *rx = &rinfo.rx;
843 struct xen_netif_extra_info *extras = rinfo.extras;
844 RING_IDX i, rp;
bea3348e 845 int work_done;
0d160211
JF
846 struct sk_buff_head rxq;
847 struct sk_buff_head errq;
848 struct sk_buff_head tmpq;
849 unsigned long flags;
850 unsigned int len;
851 int err;
852
853 spin_lock(&np->rx_lock);
854
855 if (unlikely(!netif_carrier_ok(dev))) {
856 spin_unlock(&np->rx_lock);
857 return 0;
858 }
859
860 skb_queue_head_init(&rxq);
861 skb_queue_head_init(&errq);
862 skb_queue_head_init(&tmpq);
863
0d160211
JF
864 rp = np->rx.sring->rsp_prod;
865 rmb(); /* Ensure we see queued responses up to 'rp'. */
866
867 i = np->rx.rsp_cons;
868 work_done = 0;
869 while ((i != rp) && (work_done < budget)) {
870 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
871 memset(extras, 0, sizeof(rinfo.extras));
872
873 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
874
875 if (unlikely(err)) {
876err:
877 while ((skb = __skb_dequeue(&tmpq)))
878 __skb_queue_tail(&errq, skb);
09f75cd7 879 dev->stats.rx_errors++;
0d160211
JF
880 i = np->rx.rsp_cons;
881 continue;
882 }
883
884 skb = __skb_dequeue(&tmpq);
885
886 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
887 struct xen_netif_extra_info *gso;
888 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
889
890 if (unlikely(xennet_set_skb_gso(skb, gso))) {
891 __skb_queue_head(&tmpq, skb);
892 np->rx.rsp_cons += skb_queue_len(&tmpq);
893 goto err;
894 }
895 }
896
897 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
898 NETFRONT_SKB_CB(skb)->offset = rx->offset;
899
900 len = rx->status;
901 if (len > RX_COPY_THRESHOLD)
902 len = RX_COPY_THRESHOLD;
903 skb_put(skb, len);
904
905 if (rx->status > len) {
906 skb_shinfo(skb)->frags[0].page_offset =
907 rx->offset + len;
908 skb_shinfo(skb)->frags[0].size = rx->status - len;
909 skb->data_len = rx->status - len;
910 } else {
911 skb_shinfo(skb)->frags[0].page = NULL;
912 skb_shinfo(skb)->nr_frags = 0;
913 }
914
915 i = xennet_fill_frags(np, skb, &tmpq);
916
917 /*
918 * Truesize approximates the size of true data plus
919 * any supervisor overheads. Adding hypervisor
920 * overheads has been shown to significantly reduce
921 * achievable bandwidth with the default receive
922 * buffer size. It is therefore not wise to account
923 * for it here.
924 *
925 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
926 * to RX_COPY_THRESHOLD + the supervisor
927 * overheads. Here, we add the size of the data pulled
928 * in xennet_fill_frags().
929 *
930 * We also adjust for any unused space in the main
931 * data area by subtracting (RX_COPY_THRESHOLD -
932 * len). This is especially important with drivers
933 * which split incoming packets into header and data,
934 * using only 66 bytes of the main data area (see the
935 * e1000 driver for example.) On such systems,
936 * without this last adjustement, our achievable
937 * receive throughout using the standard receive
938 * buffer size was cut by 25%(!!!).
939 */
940 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
941 skb->len += skb->data_len;
942
943 if (rx->flags & NETRXF_csum_blank)
944 skb->ip_summed = CHECKSUM_PARTIAL;
945 else if (rx->flags & NETRXF_data_validated)
946 skb->ip_summed = CHECKSUM_UNNECESSARY;
947
948 __skb_queue_tail(&rxq, skb);
949
950 np->rx.rsp_cons = ++i;
951 work_done++;
952 }
953
954 while ((skb = __skb_dequeue(&errq)))
955 kfree_skb(skb);
956
957 work_done -= handle_incoming_queue(dev, &rxq);
958
959 /* If we get a callback with very few responses, reduce fill target. */
960 /* NB. Note exponential increase, linear decrease. */
961 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
962 ((3*np->rx_target) / 4)) &&
963 (--np->rx_target < np->rx_min_target))
964 np->rx_target = np->rx_min_target;
965
966 xennet_alloc_rx_buffers(dev);
967
0d160211 968 if (work_done < budget) {
bea3348e
SH
969 int more_to_do = 0;
970
0d160211
JF
971 local_irq_save(flags);
972
973 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
974 if (!more_to_do)
bea3348e 975 __netif_rx_complete(dev, napi);
0d160211
JF
976
977 local_irq_restore(flags);
978 }
979
980 spin_unlock(&np->rx_lock);
981
bea3348e 982 return work_done;
0d160211
JF
983}
984
985static int xennet_change_mtu(struct net_device *dev, int mtu)
986{
987 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
988
989 if (mtu > max)
990 return -EINVAL;
991 dev->mtu = mtu;
992 return 0;
993}
994
995static void xennet_release_tx_bufs(struct netfront_info *np)
996{
997 struct sk_buff *skb;
998 int i;
999
1000 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1001 /* Skip over entries which are actually freelist references */
1002 if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
1003 continue;
1004
1005 skb = np->tx_skbs[i].skb;
1006 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1007 GNTMAP_readonly);
1008 gnttab_release_grant_reference(&np->gref_tx_head,
1009 np->grant_tx_ref[i]);
1010 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1011 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1012 dev_kfree_skb_irq(skb);
1013 }
1014}
1015
1016static void xennet_release_rx_bufs(struct netfront_info *np)
1017{
1018 struct mmu_update *mmu = np->rx_mmu;
1019 struct multicall_entry *mcl = np->rx_mcl;
1020 struct sk_buff_head free_list;
1021 struct sk_buff *skb;
1022 unsigned long mfn;
1023 int xfer = 0, noxfer = 0, unused = 0;
1024 int id, ref;
1025
1026 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1027 __func__);
1028 return;
1029
1030 skb_queue_head_init(&free_list);
1031
1032 spin_lock_bh(&np->rx_lock);
1033
1034 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1035 ref = np->grant_rx_ref[id];
1036 if (ref == GRANT_INVALID_REF) {
1037 unused++;
1038 continue;
1039 }
1040
1041 skb = np->rx_skbs[id];
1042 mfn = gnttab_end_foreign_transfer_ref(ref);
1043 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1044 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1045
1046 if (0 == mfn) {
1047 skb_shinfo(skb)->nr_frags = 0;
1048 dev_kfree_skb(skb);
1049 noxfer++;
1050 continue;
1051 }
1052
1053 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1054 /* Remap the page. */
1055 struct page *page = skb_shinfo(skb)->frags[0].page;
1056 unsigned long pfn = page_to_pfn(page);
1057 void *vaddr = page_address(page);
1058
1059 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1060 mfn_pte(mfn, PAGE_KERNEL),
1061 0);
1062 mcl++;
1063 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1064 | MMU_MACHPHYS_UPDATE;
1065 mmu->val = pfn;
1066 mmu++;
1067
1068 set_phys_to_machine(pfn, mfn);
1069 }
1070 __skb_queue_tail(&free_list, skb);
1071 xfer++;
1072 }
1073
1074 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1075 __func__, xfer, noxfer, unused);
1076
1077 if (xfer) {
1078 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1079 /* Do all the remapping work and M2P updates. */
1080 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1081 0, DOMID_SELF);
1082 mcl++;
1083 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1084 }
1085 }
1086
1087 while ((skb = __skb_dequeue(&free_list)) != NULL)
1088 dev_kfree_skb(skb);
1089
1090 spin_unlock_bh(&np->rx_lock);
1091}
1092
1093static void xennet_uninit(struct net_device *dev)
1094{
1095 struct netfront_info *np = netdev_priv(dev);
1096 xennet_release_tx_bufs(np);
1097 xennet_release_rx_bufs(np);
1098 gnttab_free_grant_references(np->gref_tx_head);
1099 gnttab_free_grant_references(np->gref_rx_head);
1100}
1101
1102static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
1103{
1104 int i, err;
1105 struct net_device *netdev;
1106 struct netfront_info *np;
1107
1108 netdev = alloc_etherdev(sizeof(struct netfront_info));
1109 if (!netdev) {
1110 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1111 __func__);
1112 return ERR_PTR(-ENOMEM);
1113 }
1114
1115 np = netdev_priv(netdev);
1116 np->xbdev = dev;
1117
1118 spin_lock_init(&np->tx_lock);
1119 spin_lock_init(&np->rx_lock);
1120
1121 skb_queue_head_init(&np->rx_batch);
1122 np->rx_target = RX_DFL_MIN_TARGET;
1123 np->rx_min_target = RX_DFL_MIN_TARGET;
1124 np->rx_max_target = RX_MAX_TARGET;
1125
1126 init_timer(&np->rx_refill_timer);
1127 np->rx_refill_timer.data = (unsigned long)netdev;
1128 np->rx_refill_timer.function = rx_refill_timeout;
1129
1130 /* Initialise tx_skbs as a free chain containing every entry. */
1131 np->tx_skb_freelist = 0;
1132 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1133 np->tx_skbs[i].link = i+1;
1134 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1135 }
1136
1137 /* Clear out rx_skbs */
1138 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1139 np->rx_skbs[i] = NULL;
1140 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1141 }
1142
1143 /* A grant for every tx ring slot */
1144 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1145 &np->gref_tx_head) < 0) {
1146 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1147 err = -ENOMEM;
1148 goto exit;
1149 }
1150 /* A grant for every rx ring slot */
1151 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1152 &np->gref_rx_head) < 0) {
1153 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1154 err = -ENOMEM;
1155 goto exit_free_tx;
1156 }
1157
1158 netdev->open = xennet_open;
1159 netdev->hard_start_xmit = xennet_start_xmit;
1160 netdev->stop = xennet_close;
bea3348e 1161 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
0d160211
JF
1162 netdev->uninit = xennet_uninit;
1163 netdev->change_mtu = xennet_change_mtu;
0d160211
JF
1164 netdev->features = NETIF_F_IP_CSUM;
1165
1166 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
0d160211
JF
1167 SET_NETDEV_DEV(netdev, &dev->dev);
1168
1169 np->netdev = netdev;
1170
1171 netif_carrier_off(netdev);
1172
1173 return netdev;
1174
1175 exit_free_tx:
1176 gnttab_free_grant_references(np->gref_tx_head);
1177 exit:
1178 free_netdev(netdev);
1179 return ERR_PTR(err);
1180}
1181
1182/**
1183 * Entry point to this code when a new device is created. Allocate the basic
1184 * structures and the ring buffers for communication with the backend, and
1185 * inform the backend of the appropriate details for those.
1186 */
1187static int __devinit netfront_probe(struct xenbus_device *dev,
1188 const struct xenbus_device_id *id)
1189{
1190 int err;
1191 struct net_device *netdev;
1192 struct netfront_info *info;
1193
1194 netdev = xennet_create_dev(dev);
1195 if (IS_ERR(netdev)) {
1196 err = PTR_ERR(netdev);
1197 xenbus_dev_fatal(dev, err, "creating netdev");
1198 return err;
1199 }
1200
1201 info = netdev_priv(netdev);
1202 dev->dev.driver_data = info;
1203
1204 err = register_netdev(info->netdev);
1205 if (err) {
1206 printk(KERN_WARNING "%s: register_netdev err=%d\n",
1207 __func__, err);
1208 goto fail;
1209 }
1210
1211 err = xennet_sysfs_addif(info->netdev);
1212 if (err) {
1213 unregister_netdev(info->netdev);
1214 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1215 __func__, err);
1216 goto fail;
1217 }
1218
1219 return 0;
1220
1221 fail:
1222 free_netdev(netdev);
1223 dev->dev.driver_data = NULL;
1224 return err;
1225}
1226
1227static void xennet_end_access(int ref, void *page)
1228{
1229 /* This frees the page as a side-effect */
1230 if (ref != GRANT_INVALID_REF)
1231 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1232}
1233
1234static void xennet_disconnect_backend(struct netfront_info *info)
1235{
1236 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1237 spin_lock_bh(&info->rx_lock);
1238 spin_lock_irq(&info->tx_lock);
1239 netif_carrier_off(info->netdev);
1240 spin_unlock_irq(&info->tx_lock);
1241 spin_unlock_bh(&info->rx_lock);
1242
1243 if (info->netdev->irq)
1244 unbind_from_irqhandler(info->netdev->irq, info->netdev);
1245 info->evtchn = info->netdev->irq = 0;
1246
1247 /* End access and free the pages */
1248 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1249 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1250
1251 info->tx_ring_ref = GRANT_INVALID_REF;
1252 info->rx_ring_ref = GRANT_INVALID_REF;
1253 info->tx.sring = NULL;
1254 info->rx.sring = NULL;
1255}
1256
1257/**
1258 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1259 * driver restart. We tear down our netif structure and recreate it, but
1260 * leave the device-layer structures intact so that this is transparent to the
1261 * rest of the kernel.
1262 */
1263static int netfront_resume(struct xenbus_device *dev)
1264{
1265 struct netfront_info *info = dev->dev.driver_data;
1266
1267 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1268
1269 xennet_disconnect_backend(info);
1270 return 0;
1271}
1272
1273static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1274{
1275 char *s, *e, *macstr;
1276 int i;
1277
1278 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1279 if (IS_ERR(macstr))
1280 return PTR_ERR(macstr);
1281
1282 for (i = 0; i < ETH_ALEN; i++) {
1283 mac[i] = simple_strtoul(s, &e, 16);
1284 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1285 kfree(macstr);
1286 return -ENOENT;
1287 }
1288 s = e+1;
1289 }
1290
1291 kfree(macstr);
1292 return 0;
1293}
1294
1295static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1296{
1297 struct net_device *dev = dev_id;
1298 struct netfront_info *np = netdev_priv(dev);
1299 unsigned long flags;
1300
1301 spin_lock_irqsave(&np->tx_lock, flags);
1302
1303 if (likely(netif_carrier_ok(dev))) {
1304 xennet_tx_buf_gc(dev);
1305 /* Under tx_lock: protects access to rx shared-ring indexes. */
1306 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
bea3348e 1307 netif_rx_schedule(dev, &np->napi);
0d160211
JF
1308 }
1309
1310 spin_unlock_irqrestore(&np->tx_lock, flags);
1311
1312 return IRQ_HANDLED;
1313}
1314
1315static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1316{
1317 struct xen_netif_tx_sring *txs;
1318 struct xen_netif_rx_sring *rxs;
1319 int err;
1320 struct net_device *netdev = info->netdev;
1321
1322 info->tx_ring_ref = GRANT_INVALID_REF;
1323 info->rx_ring_ref = GRANT_INVALID_REF;
1324 info->rx.sring = NULL;
1325 info->tx.sring = NULL;
1326 netdev->irq = 0;
1327
1328 err = xen_net_read_mac(dev, netdev->dev_addr);
1329 if (err) {
1330 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1331 goto fail;
1332 }
1333
1334 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
1335 if (!txs) {
1336 err = -ENOMEM;
1337 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1338 goto fail;
1339 }
1340 SHARED_RING_INIT(txs);
1341 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1342
1343 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1344 if (err < 0) {
1345 free_page((unsigned long)txs);
1346 goto fail;
1347 }
1348
1349 info->tx_ring_ref = err;
1350 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
1351 if (!rxs) {
1352 err = -ENOMEM;
1353 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1354 goto fail;
1355 }
1356 SHARED_RING_INIT(rxs);
1357 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1358
1359 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1360 if (err < 0) {
1361 free_page((unsigned long)rxs);
1362 goto fail;
1363 }
1364 info->rx_ring_ref = err;
1365
1366 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1367 if (err)
1368 goto fail;
1369
1370 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1371 IRQF_SAMPLE_RANDOM, netdev->name,
1372 netdev);
1373 if (err < 0)
1374 goto fail;
1375 netdev->irq = err;
1376 return 0;
1377
1378 fail:
1379 return err;
1380}
1381
1382/* Common code used when first setting up, and when resuming. */
1383static int talk_to_backend(struct xenbus_device *dev,
1384 struct netfront_info *info)
1385{
1386 const char *message;
1387 struct xenbus_transaction xbt;
1388 int err;
1389
1390 /* Create shared ring, alloc event channel. */
1391 err = setup_netfront(dev, info);
1392 if (err)
1393 goto out;
1394
1395again:
1396 err = xenbus_transaction_start(&xbt);
1397 if (err) {
1398 xenbus_dev_fatal(dev, err, "starting transaction");
1399 goto destroy_ring;
1400 }
1401
1402 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1403 info->tx_ring_ref);
1404 if (err) {
1405 message = "writing tx ring-ref";
1406 goto abort_transaction;
1407 }
1408 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1409 info->rx_ring_ref);
1410 if (err) {
1411 message = "writing rx ring-ref";
1412 goto abort_transaction;
1413 }
1414 err = xenbus_printf(xbt, dev->nodename,
1415 "event-channel", "%u", info->evtchn);
1416 if (err) {
1417 message = "writing event-channel";
1418 goto abort_transaction;
1419 }
1420
1421 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1422 1);
1423 if (err) {
1424 message = "writing request-rx-copy";
1425 goto abort_transaction;
1426 }
1427
1428 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1429 if (err) {
1430 message = "writing feature-rx-notify";
1431 goto abort_transaction;
1432 }
1433
1434 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1435 if (err) {
1436 message = "writing feature-sg";
1437 goto abort_transaction;
1438 }
1439
1440 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1441 if (err) {
1442 message = "writing feature-gso-tcpv4";
1443 goto abort_transaction;
1444 }
1445
1446 err = xenbus_transaction_end(xbt, 0);
1447 if (err) {
1448 if (err == -EAGAIN)
1449 goto again;
1450 xenbus_dev_fatal(dev, err, "completing transaction");
1451 goto destroy_ring;
1452 }
1453
1454 return 0;
1455
1456 abort_transaction:
1457 xenbus_transaction_end(xbt, 1);
1458 xenbus_dev_fatal(dev, err, "%s", message);
1459 destroy_ring:
1460 xennet_disconnect_backend(info);
1461 out:
1462 return err;
1463}
1464
1465static int xennet_set_sg(struct net_device *dev, u32 data)
1466{
1467 if (data) {
1468 struct netfront_info *np = netdev_priv(dev);
1469 int val;
1470
1471 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1472 "%d", &val) < 0)
1473 val = 0;
1474 if (!val)
1475 return -ENOSYS;
1476 } else if (dev->mtu > ETH_DATA_LEN)
1477 dev->mtu = ETH_DATA_LEN;
1478
1479 return ethtool_op_set_sg(dev, data);
1480}
1481
1482static int xennet_set_tso(struct net_device *dev, u32 data)
1483{
1484 if (data) {
1485 struct netfront_info *np = netdev_priv(dev);
1486 int val;
1487
1488 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1489 "feature-gso-tcpv4", "%d", &val) < 0)
1490 val = 0;
1491 if (!val)
1492 return -ENOSYS;
1493 }
1494
1495 return ethtool_op_set_tso(dev, data);
1496}
1497
1498static void xennet_set_features(struct net_device *dev)
1499{
1500 /* Turn off all GSO bits except ROBUST. */
1501 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
1502 dev->features |= NETIF_F_GSO_ROBUST;
1503 xennet_set_sg(dev, 0);
1504
1505 /* We need checksum offload to enable scatter/gather and TSO. */
1506 if (!(dev->features & NETIF_F_IP_CSUM))
1507 return;
1508
1509 if (!xennet_set_sg(dev, 1))
1510 xennet_set_tso(dev, 1);
1511}
1512
1513static int xennet_connect(struct net_device *dev)
1514{
1515 struct netfront_info *np = netdev_priv(dev);
1516 int i, requeue_idx, err;
1517 struct sk_buff *skb;
1518 grant_ref_t ref;
1519 struct xen_netif_rx_request *req;
1520 unsigned int feature_rx_copy;
1521
1522 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1523 "feature-rx-copy", "%u", &feature_rx_copy);
1524 if (err != 1)
1525 feature_rx_copy = 0;
1526
1527 if (!feature_rx_copy) {
1528 dev_info(&dev->dev,
1529 "backend does not support copying recieve path");
1530 return -ENODEV;
1531 }
1532
1533 err = talk_to_backend(np->xbdev, np);
1534 if (err)
1535 return err;
1536
1537 xennet_set_features(dev);
1538
1539 spin_lock_bh(&np->rx_lock);
1540 spin_lock_irq(&np->tx_lock);
1541
1542 /* Step 1: Discard all pending TX packet fragments. */
1543 xennet_release_tx_bufs(np);
1544
1545 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1546 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1547 if (!np->rx_skbs[i])
1548 continue;
1549
1550 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1551 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1552 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1553
1554 gnttab_grant_foreign_access_ref(
1555 ref, np->xbdev->otherend_id,
1556 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1557 frags->page)),
1558 0);
1559 req->gref = ref;
1560 req->id = requeue_idx;
1561
1562 requeue_idx++;
1563 }
1564
1565 np->rx.req_prod_pvt = requeue_idx;
1566
1567 /*
1568 * Step 3: All public and private state should now be sane. Get
1569 * ready to start sending and receiving packets and give the driver
1570 * domain a kick because we've probably just requeued some
1571 * packets.
1572 */
1573 netif_carrier_on(np->netdev);
1574 notify_remote_via_irq(np->netdev->irq);
1575 xennet_tx_buf_gc(dev);
1576 xennet_alloc_rx_buffers(dev);
1577
1578 spin_unlock_irq(&np->tx_lock);
1579 spin_unlock_bh(&np->rx_lock);
1580
1581 return 0;
1582}
1583
1584/**
1585 * Callback received when the backend's state changes.
1586 */
1587static void backend_changed(struct xenbus_device *dev,
1588 enum xenbus_state backend_state)
1589{
1590 struct netfront_info *np = dev->dev.driver_data;
1591 struct net_device *netdev = np->netdev;
1592
1593 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1594
1595 switch (backend_state) {
1596 case XenbusStateInitialising:
1597 case XenbusStateInitialised:
1598 case XenbusStateConnected:
1599 case XenbusStateUnknown:
1600 case XenbusStateClosed:
1601 break;
1602
1603 case XenbusStateInitWait:
1604 if (dev->state != XenbusStateInitialising)
1605 break;
1606 if (xennet_connect(netdev) != 0)
1607 break;
1608 xenbus_switch_state(dev, XenbusStateConnected);
1609 break;
1610
1611 case XenbusStateClosing:
1612 xenbus_frontend_closed(dev);
1613 break;
1614 }
1615}
1616
1617static struct ethtool_ops xennet_ethtool_ops =
1618{
0d160211 1619 .set_tx_csum = ethtool_op_set_tx_csum,
0d160211 1620 .set_sg = xennet_set_sg,
0d160211
JF
1621 .set_tso = xennet_set_tso,
1622 .get_link = ethtool_op_get_link,
1623};
1624
1625#ifdef CONFIG_SYSFS
1626static ssize_t show_rxbuf_min(struct device *dev,
1627 struct device_attribute *attr, char *buf)
1628{
1629 struct net_device *netdev = to_net_dev(dev);
1630 struct netfront_info *info = netdev_priv(netdev);
1631
1632 return sprintf(buf, "%u\n", info->rx_min_target);
1633}
1634
1635static ssize_t store_rxbuf_min(struct device *dev,
1636 struct device_attribute *attr,
1637 const char *buf, size_t len)
1638{
1639 struct net_device *netdev = to_net_dev(dev);
1640 struct netfront_info *np = netdev_priv(netdev);
1641 char *endp;
1642 unsigned long target;
1643
1644 if (!capable(CAP_NET_ADMIN))
1645 return -EPERM;
1646
1647 target = simple_strtoul(buf, &endp, 0);
1648 if (endp == buf)
1649 return -EBADMSG;
1650
1651 if (target < RX_MIN_TARGET)
1652 target = RX_MIN_TARGET;
1653 if (target > RX_MAX_TARGET)
1654 target = RX_MAX_TARGET;
1655
1656 spin_lock_bh(&np->rx_lock);
1657 if (target > np->rx_max_target)
1658 np->rx_max_target = target;
1659 np->rx_min_target = target;
1660 if (target > np->rx_target)
1661 np->rx_target = target;
1662
1663 xennet_alloc_rx_buffers(netdev);
1664
1665 spin_unlock_bh(&np->rx_lock);
1666 return len;
1667}
1668
1669static ssize_t show_rxbuf_max(struct device *dev,
1670 struct device_attribute *attr, char *buf)
1671{
1672 struct net_device *netdev = to_net_dev(dev);
1673 struct netfront_info *info = netdev_priv(netdev);
1674
1675 return sprintf(buf, "%u\n", info->rx_max_target);
1676}
1677
1678static ssize_t store_rxbuf_max(struct device *dev,
1679 struct device_attribute *attr,
1680 const char *buf, size_t len)
1681{
1682 struct net_device *netdev = to_net_dev(dev);
1683 struct netfront_info *np = netdev_priv(netdev);
1684 char *endp;
1685 unsigned long target;
1686
1687 if (!capable(CAP_NET_ADMIN))
1688 return -EPERM;
1689
1690 target = simple_strtoul(buf, &endp, 0);
1691 if (endp == buf)
1692 return -EBADMSG;
1693
1694 if (target < RX_MIN_TARGET)
1695 target = RX_MIN_TARGET;
1696 if (target > RX_MAX_TARGET)
1697 target = RX_MAX_TARGET;
1698
1699 spin_lock_bh(&np->rx_lock);
1700 if (target < np->rx_min_target)
1701 np->rx_min_target = target;
1702 np->rx_max_target = target;
1703 if (target < np->rx_target)
1704 np->rx_target = target;
1705
1706 xennet_alloc_rx_buffers(netdev);
1707
1708 spin_unlock_bh(&np->rx_lock);
1709 return len;
1710}
1711
1712static ssize_t show_rxbuf_cur(struct device *dev,
1713 struct device_attribute *attr, char *buf)
1714{
1715 struct net_device *netdev = to_net_dev(dev);
1716 struct netfront_info *info = netdev_priv(netdev);
1717
1718 return sprintf(buf, "%u\n", info->rx_target);
1719}
1720
1721static struct device_attribute xennet_attrs[] = {
1722 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1723 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1724 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1725};
1726
1727static int xennet_sysfs_addif(struct net_device *netdev)
1728{
1729 int i;
1730 int err;
1731
1732 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1733 err = device_create_file(&netdev->dev,
1734 &xennet_attrs[i]);
1735 if (err)
1736 goto fail;
1737 }
1738 return 0;
1739
1740 fail:
1741 while (--i >= 0)
1742 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1743 return err;
1744}
1745
1746static void xennet_sysfs_delif(struct net_device *netdev)
1747{
1748 int i;
1749
1750 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1751 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1752}
1753
1754#endif /* CONFIG_SYSFS */
1755
1756static struct xenbus_device_id netfront_ids[] = {
1757 { "vif" },
1758 { "" }
1759};
1760
1761
1762static int __devexit xennet_remove(struct xenbus_device *dev)
1763{
1764 struct netfront_info *info = dev->dev.driver_data;
1765
1766 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1767
1768 unregister_netdev(info->netdev);
1769
1770 xennet_disconnect_backend(info);
1771
1772 del_timer_sync(&info->rx_refill_timer);
1773
1774 xennet_sysfs_delif(info->netdev);
1775
1776 free_netdev(info->netdev);
1777
1778 return 0;
1779}
1780
1781static struct xenbus_driver netfront = {
1782 .name = "vif",
1783 .owner = THIS_MODULE,
1784 .ids = netfront_ids,
1785 .probe = netfront_probe,
1786 .remove = __devexit_p(xennet_remove),
1787 .resume = netfront_resume,
1788 .otherend_changed = backend_changed,
1789};
1790
1791static int __init netif_init(void)
1792{
1793 if (!is_running_on_xen())
1794 return -ENODEV;
1795
1796 if (is_initial_xendomain())
1797 return 0;
1798
1799 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1800
1801 return xenbus_register_frontend(&netfront);
1802}
1803module_init(netif_init);
1804
1805
1806static void __exit netif_exit(void)
1807{
1808 if (is_initial_xendomain())
1809 return;
1810
1811 return xenbus_unregister_driver(&netfront);
1812}
1813module_exit(netif_exit);
1814
1815MODULE_DESCRIPTION("Xen virtual network device frontend");
1816MODULE_LICENSE("GPL");
This page took 0.153531 seconds and 5 git commands to generate.