Merge tag 'md/4.8-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[deliverable/linux.git] / drivers / net / xen-netback / interface.c
1 /*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31 #include "common.h"
32
33 #include <linux/kthread.h>
34 #include <linux/ethtool.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/if_vlan.h>
37 #include <linux/vmalloc.h>
38
39 #include <xen/events.h>
40 #include <asm/xen/hypercall.h>
41 #include <xen/balloon.h>
42
43 #define XENVIF_QUEUE_LENGTH 32
44 #define XENVIF_NAPI_WEIGHT 64
45
46 /* Number of bytes allowed on the internal guest Rx queue. */
47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
49 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56 {
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59 }
60
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62 {
63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
70 }
71
72 int xenvif_schedulable(struct xenvif *vif)
73 {
74 return netif_running(vif->dev) &&
75 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
77 }
78
79 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
80 {
81 struct xenvif_queue *queue = dev_id;
82
83 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 napi_schedule(&queue->napi);
85
86 return IRQ_HANDLED;
87 }
88
89 static int xenvif_poll(struct napi_struct *napi, int budget)
90 {
91 struct xenvif_queue *queue =
92 container_of(napi, struct xenvif_queue, napi);
93 int work_done;
94
95 /* This vif is rogue, we pretend we've there is nothing to do
96 * for this vif to deschedule it from NAPI. But this interface
97 * will be turned off in thread context later.
98 */
99 if (unlikely(queue->vif->disabled)) {
100 napi_complete(napi);
101 return 0;
102 }
103
104 work_done = xenvif_tx_action(queue, budget);
105
106 if (work_done < budget) {
107 napi_complete(napi);
108 xenvif_napi_schedule_or_enable_events(queue);
109 }
110
111 return work_done;
112 }
113
114 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115 {
116 struct xenvif_queue *queue = dev_id;
117
118 xenvif_kick_thread(queue);
119
120 return IRQ_HANDLED;
121 }
122
123 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
124 {
125 xenvif_tx_interrupt(irq, dev_id);
126 xenvif_rx_interrupt(irq, dev_id);
127
128 return IRQ_HANDLED;
129 }
130
131 irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
132 {
133 struct xenvif *vif = dev_id;
134
135 wake_up(&vif->ctrl_wq);
136
137 return IRQ_HANDLED;
138 }
139
140 int xenvif_queue_stopped(struct xenvif_queue *queue)
141 {
142 struct net_device *dev = queue->vif->dev;
143 unsigned int id = queue->id;
144 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
145 }
146
147 void xenvif_wake_queue(struct xenvif_queue *queue)
148 {
149 struct net_device *dev = queue->vif->dev;
150 unsigned int id = queue->id;
151 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
152 }
153
154 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
155 void *accel_priv,
156 select_queue_fallback_t fallback)
157 {
158 struct xenvif *vif = netdev_priv(dev);
159 unsigned int size = vif->hash.size;
160
161 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
162 u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
163
164 /* Make sure there is no hash information in the socket
165 * buffer otherwise it would be incorrectly forwarded
166 * to the frontend.
167 */
168 skb_clear_hash(skb);
169
170 return index;
171 }
172
173 xenvif_set_skb_hash(vif, skb);
174
175 if (size == 0)
176 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
177
178 return vif->hash.mapping[skb_get_hash_raw(skb) % size];
179 }
180
181 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
182 {
183 struct xenvif *vif = netdev_priv(dev);
184 struct xenvif_queue *queue = NULL;
185 unsigned int num_queues = vif->num_queues;
186 u16 index;
187 struct xenvif_rx_cb *cb;
188
189 BUG_ON(skb->dev != dev);
190
191 /* Drop the packet if queues are not set up */
192 if (num_queues < 1)
193 goto drop;
194
195 /* Obtain the queue to be used to transmit this packet */
196 index = skb_get_queue_mapping(skb);
197 if (index >= num_queues) {
198 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
199 index, vif->dev->name);
200 index %= num_queues;
201 }
202 queue = &vif->queues[index];
203
204 /* Drop the packet if queue is not ready */
205 if (queue->task == NULL ||
206 queue->dealloc_task == NULL ||
207 !xenvif_schedulable(vif))
208 goto drop;
209
210 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211 struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213 if (!xenvif_mcast_match(vif, eth->h_dest))
214 goto drop;
215 }
216
217 cb = XENVIF_RX_CB(skb);
218 cb->expires = jiffies + vif->drain_timeout;
219
220 xenvif_rx_queue_tail(queue, skb);
221 xenvif_kick_thread(queue);
222
223 return NETDEV_TX_OK;
224
225 drop:
226 vif->dev->stats.tx_dropped++;
227 dev_kfree_skb(skb);
228 return NETDEV_TX_OK;
229 }
230
231 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
232 {
233 struct xenvif *vif = netdev_priv(dev);
234 struct xenvif_queue *queue = NULL;
235 unsigned int num_queues = vif->num_queues;
236 unsigned long rx_bytes = 0;
237 unsigned long rx_packets = 0;
238 unsigned long tx_bytes = 0;
239 unsigned long tx_packets = 0;
240 unsigned int index;
241
242 if (vif->queues == NULL)
243 goto out;
244
245 /* Aggregate tx and rx stats from each queue */
246 for (index = 0; index < num_queues; ++index) {
247 queue = &vif->queues[index];
248 rx_bytes += queue->stats.rx_bytes;
249 rx_packets += queue->stats.rx_packets;
250 tx_bytes += queue->stats.tx_bytes;
251 tx_packets += queue->stats.tx_packets;
252 }
253
254 out:
255 vif->dev->stats.rx_bytes = rx_bytes;
256 vif->dev->stats.rx_packets = rx_packets;
257 vif->dev->stats.tx_bytes = tx_bytes;
258 vif->dev->stats.tx_packets = tx_packets;
259
260 return &vif->dev->stats;
261 }
262
263 static void xenvif_up(struct xenvif *vif)
264 {
265 struct xenvif_queue *queue = NULL;
266 unsigned int num_queues = vif->num_queues;
267 unsigned int queue_index;
268
269 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
270 queue = &vif->queues[queue_index];
271 napi_enable(&queue->napi);
272 enable_irq(queue->tx_irq);
273 if (queue->tx_irq != queue->rx_irq)
274 enable_irq(queue->rx_irq);
275 xenvif_napi_schedule_or_enable_events(queue);
276 }
277 }
278
279 static void xenvif_down(struct xenvif *vif)
280 {
281 struct xenvif_queue *queue = NULL;
282 unsigned int num_queues = vif->num_queues;
283 unsigned int queue_index;
284
285 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
286 queue = &vif->queues[queue_index];
287 disable_irq(queue->tx_irq);
288 if (queue->tx_irq != queue->rx_irq)
289 disable_irq(queue->rx_irq);
290 napi_disable(&queue->napi);
291 del_timer_sync(&queue->credit_timeout);
292 }
293 }
294
295 static int xenvif_open(struct net_device *dev)
296 {
297 struct xenvif *vif = netdev_priv(dev);
298 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
299 xenvif_up(vif);
300 netif_tx_start_all_queues(dev);
301 return 0;
302 }
303
304 static int xenvif_close(struct net_device *dev)
305 {
306 struct xenvif *vif = netdev_priv(dev);
307 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
308 xenvif_down(vif);
309 netif_tx_stop_all_queues(dev);
310 return 0;
311 }
312
313 static int xenvif_change_mtu(struct net_device *dev, int mtu)
314 {
315 struct xenvif *vif = netdev_priv(dev);
316 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
317
318 if (mtu > max)
319 return -EINVAL;
320 dev->mtu = mtu;
321 return 0;
322 }
323
324 static netdev_features_t xenvif_fix_features(struct net_device *dev,
325 netdev_features_t features)
326 {
327 struct xenvif *vif = netdev_priv(dev);
328
329 if (!vif->can_sg)
330 features &= ~NETIF_F_SG;
331 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
332 features &= ~NETIF_F_TSO;
333 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
334 features &= ~NETIF_F_TSO6;
335 if (!vif->ip_csum)
336 features &= ~NETIF_F_IP_CSUM;
337 if (!vif->ipv6_csum)
338 features &= ~NETIF_F_IPV6_CSUM;
339
340 return features;
341 }
342
343 static const struct xenvif_stat {
344 char name[ETH_GSTRING_LEN];
345 u16 offset;
346 } xenvif_stats[] = {
347 {
348 "rx_gso_checksum_fixup",
349 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
350 },
351 /* If (sent != success + fail), there are probably packets never
352 * freed up properly!
353 */
354 {
355 "tx_zerocopy_sent",
356 offsetof(struct xenvif_stats, tx_zerocopy_sent),
357 },
358 {
359 "tx_zerocopy_success",
360 offsetof(struct xenvif_stats, tx_zerocopy_success),
361 },
362 {
363 "tx_zerocopy_fail",
364 offsetof(struct xenvif_stats, tx_zerocopy_fail)
365 },
366 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
367 * a guest with the same MAX_SKB_FRAG
368 */
369 {
370 "tx_frag_overflow",
371 offsetof(struct xenvif_stats, tx_frag_overflow)
372 },
373 };
374
375 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
376 {
377 switch (string_set) {
378 case ETH_SS_STATS:
379 return ARRAY_SIZE(xenvif_stats);
380 default:
381 return -EINVAL;
382 }
383 }
384
385 static void xenvif_get_ethtool_stats(struct net_device *dev,
386 struct ethtool_stats *stats, u64 * data)
387 {
388 struct xenvif *vif = netdev_priv(dev);
389 unsigned int num_queues = vif->num_queues;
390 int i;
391 unsigned int queue_index;
392
393 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
394 unsigned long accum = 0;
395 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
396 void *vif_stats = &vif->queues[queue_index].stats;
397 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
398 }
399 data[i] = accum;
400 }
401 }
402
403 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
404 {
405 int i;
406
407 switch (stringset) {
408 case ETH_SS_STATS:
409 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
410 memcpy(data + i * ETH_GSTRING_LEN,
411 xenvif_stats[i].name, ETH_GSTRING_LEN);
412 break;
413 }
414 }
415
416 static const struct ethtool_ops xenvif_ethtool_ops = {
417 .get_link = ethtool_op_get_link,
418
419 .get_sset_count = xenvif_get_sset_count,
420 .get_ethtool_stats = xenvif_get_ethtool_stats,
421 .get_strings = xenvif_get_strings,
422 };
423
424 static const struct net_device_ops xenvif_netdev_ops = {
425 .ndo_select_queue = xenvif_select_queue,
426 .ndo_start_xmit = xenvif_start_xmit,
427 .ndo_get_stats = xenvif_get_stats,
428 .ndo_open = xenvif_open,
429 .ndo_stop = xenvif_close,
430 .ndo_change_mtu = xenvif_change_mtu,
431 .ndo_fix_features = xenvif_fix_features,
432 .ndo_set_mac_address = eth_mac_addr,
433 .ndo_validate_addr = eth_validate_addr,
434 };
435
436 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
437 unsigned int handle)
438 {
439 int err;
440 struct net_device *dev;
441 struct xenvif *vif;
442 char name[IFNAMSIZ] = {};
443
444 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
445 /* Allocate a netdev with the max. supported number of queues.
446 * When the guest selects the desired number, it will be updated
447 * via netif_set_real_num_*_queues().
448 */
449 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
450 ether_setup, xenvif_max_queues);
451 if (dev == NULL) {
452 pr_warn("Could not allocate netdev for %s\n", name);
453 return ERR_PTR(-ENOMEM);
454 }
455
456 SET_NETDEV_DEV(dev, parent);
457
458 vif = netdev_priv(dev);
459
460 vif->domid = domid;
461 vif->handle = handle;
462 vif->can_sg = 1;
463 vif->ip_csum = 1;
464 vif->dev = dev;
465 vif->disabled = false;
466 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
467 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
468
469 /* Start out with no queues. */
470 vif->queues = NULL;
471 vif->num_queues = 0;
472
473 spin_lock_init(&vif->lock);
474 INIT_LIST_HEAD(&vif->fe_mcast_addr);
475
476 dev->netdev_ops = &xenvif_netdev_ops;
477 dev->hw_features = NETIF_F_SG |
478 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
479 NETIF_F_TSO | NETIF_F_TSO6;
480 dev->features = dev->hw_features | NETIF_F_RXCSUM;
481 dev->ethtool_ops = &xenvif_ethtool_ops;
482
483 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
484
485 /*
486 * Initialise a dummy MAC address. We choose the numerically
487 * largest non-broadcast address to prevent the address getting
488 * stolen by an Ethernet bridge for STP purposes.
489 * (FE:FF:FF:FF:FF:FF)
490 */
491 eth_broadcast_addr(dev->dev_addr);
492 dev->dev_addr[0] &= ~0x01;
493
494 netif_carrier_off(dev);
495
496 err = register_netdev(dev);
497 if (err) {
498 netdev_warn(dev, "Could not register device: err=%d\n", err);
499 free_netdev(dev);
500 return ERR_PTR(err);
501 }
502
503 netdev_dbg(dev, "Successfully created xenvif\n");
504
505 __module_get(THIS_MODULE);
506
507 return vif;
508 }
509
510 int xenvif_init_queue(struct xenvif_queue *queue)
511 {
512 int err, i;
513
514 queue->credit_bytes = queue->remaining_credit = ~0UL;
515 queue->credit_usec = 0UL;
516 init_timer(&queue->credit_timeout);
517 queue->credit_timeout.function = xenvif_tx_credit_callback;
518 queue->credit_window_start = get_jiffies_64();
519
520 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
521
522 skb_queue_head_init(&queue->rx_queue);
523 skb_queue_head_init(&queue->tx_queue);
524
525 queue->pending_cons = 0;
526 queue->pending_prod = MAX_PENDING_REQS;
527 for (i = 0; i < MAX_PENDING_REQS; ++i)
528 queue->pending_ring[i] = i;
529
530 spin_lock_init(&queue->callback_lock);
531 spin_lock_init(&queue->response_lock);
532
533 /* If ballooning is disabled, this will consume real memory, so you
534 * better enable it. The long term solution would be to use just a
535 * bunch of valid page descriptors, without dependency on ballooning
536 */
537 err = gnttab_alloc_pages(MAX_PENDING_REQS,
538 queue->mmap_pages);
539 if (err) {
540 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
541 return -ENOMEM;
542 }
543
544 for (i = 0; i < MAX_PENDING_REQS; i++) {
545 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
546 { .callback = xenvif_zerocopy_callback,
547 .ctx = NULL,
548 .desc = i };
549 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
550 }
551
552 return 0;
553 }
554
555 void xenvif_carrier_on(struct xenvif *vif)
556 {
557 rtnl_lock();
558 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
559 dev_set_mtu(vif->dev, ETH_DATA_LEN);
560 netdev_update_features(vif->dev);
561 set_bit(VIF_STATUS_CONNECTED, &vif->status);
562 if (netif_running(vif->dev))
563 xenvif_up(vif);
564 rtnl_unlock();
565 }
566
567 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
568 unsigned int evtchn)
569 {
570 struct net_device *dev = vif->dev;
571 void *addr;
572 struct xen_netif_ctrl_sring *shared;
573 struct task_struct *task;
574 int err = -ENOMEM;
575
576 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
577 &ring_ref, 1, &addr);
578 if (err)
579 goto err;
580
581 shared = (struct xen_netif_ctrl_sring *)addr;
582 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
583
584 init_waitqueue_head(&vif->ctrl_wq);
585
586 err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
587 xenvif_ctrl_interrupt,
588 0, dev->name, vif);
589 if (err < 0)
590 goto err_unmap;
591
592 vif->ctrl_irq = err;
593
594 xenvif_init_hash(vif);
595
596 task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
597 "%s-control", dev->name);
598 if (IS_ERR(task)) {
599 pr_warn("Could not allocate kthread for %s\n", dev->name);
600 err = PTR_ERR(task);
601 goto err_deinit;
602 }
603
604 get_task_struct(task);
605 vif->ctrl_task = task;
606
607 wake_up_process(vif->ctrl_task);
608
609 return 0;
610
611 err_deinit:
612 xenvif_deinit_hash(vif);
613 unbind_from_irqhandler(vif->ctrl_irq, vif);
614 vif->ctrl_irq = 0;
615
616 err_unmap:
617 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
618 vif->ctrl.sring);
619 vif->ctrl.sring = NULL;
620
621 err:
622 return err;
623 }
624
625 int xenvif_connect_data(struct xenvif_queue *queue,
626 unsigned long tx_ring_ref,
627 unsigned long rx_ring_ref,
628 unsigned int tx_evtchn,
629 unsigned int rx_evtchn)
630 {
631 struct task_struct *task;
632 int err = -ENOMEM;
633
634 BUG_ON(queue->tx_irq);
635 BUG_ON(queue->task);
636 BUG_ON(queue->dealloc_task);
637
638 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
639 rx_ring_ref);
640 if (err < 0)
641 goto err;
642
643 init_waitqueue_head(&queue->wq);
644 init_waitqueue_head(&queue->dealloc_wq);
645 atomic_set(&queue->inflight_packets, 0);
646
647 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
648 XENVIF_NAPI_WEIGHT);
649
650 if (tx_evtchn == rx_evtchn) {
651 /* feature-split-event-channels == 0 */
652 err = bind_interdomain_evtchn_to_irqhandler(
653 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
654 queue->name, queue);
655 if (err < 0)
656 goto err_unmap;
657 queue->tx_irq = queue->rx_irq = err;
658 disable_irq(queue->tx_irq);
659 } else {
660 /* feature-split-event-channels == 1 */
661 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
662 "%s-tx", queue->name);
663 err = bind_interdomain_evtchn_to_irqhandler(
664 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
665 queue->tx_irq_name, queue);
666 if (err < 0)
667 goto err_unmap;
668 queue->tx_irq = err;
669 disable_irq(queue->tx_irq);
670
671 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
672 "%s-rx", queue->name);
673 err = bind_interdomain_evtchn_to_irqhandler(
674 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
675 queue->rx_irq_name, queue);
676 if (err < 0)
677 goto err_tx_unbind;
678 queue->rx_irq = err;
679 disable_irq(queue->rx_irq);
680 }
681
682 queue->stalled = true;
683
684 task = kthread_create(xenvif_kthread_guest_rx,
685 (void *)queue, "%s-guest-rx", queue->name);
686 if (IS_ERR(task)) {
687 pr_warn("Could not allocate kthread for %s\n", queue->name);
688 err = PTR_ERR(task);
689 goto err_rx_unbind;
690 }
691 queue->task = task;
692 get_task_struct(task);
693
694 task = kthread_create(xenvif_dealloc_kthread,
695 (void *)queue, "%s-dealloc", queue->name);
696 if (IS_ERR(task)) {
697 pr_warn("Could not allocate kthread for %s\n", queue->name);
698 err = PTR_ERR(task);
699 goto err_rx_unbind;
700 }
701 queue->dealloc_task = task;
702
703 wake_up_process(queue->task);
704 wake_up_process(queue->dealloc_task);
705
706 return 0;
707
708 err_rx_unbind:
709 unbind_from_irqhandler(queue->rx_irq, queue);
710 queue->rx_irq = 0;
711 err_tx_unbind:
712 unbind_from_irqhandler(queue->tx_irq, queue);
713 queue->tx_irq = 0;
714 err_unmap:
715 xenvif_unmap_frontend_data_rings(queue);
716 netif_napi_del(&queue->napi);
717 err:
718 module_put(THIS_MODULE);
719 return err;
720 }
721
722 void xenvif_carrier_off(struct xenvif *vif)
723 {
724 struct net_device *dev = vif->dev;
725
726 rtnl_lock();
727 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
728 netif_carrier_off(dev); /* discard queued packets */
729 if (netif_running(dev))
730 xenvif_down(vif);
731 }
732 rtnl_unlock();
733 }
734
735 void xenvif_disconnect_data(struct xenvif *vif)
736 {
737 struct xenvif_queue *queue = NULL;
738 unsigned int num_queues = vif->num_queues;
739 unsigned int queue_index;
740
741 xenvif_carrier_off(vif);
742
743 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
744 queue = &vif->queues[queue_index];
745
746 netif_napi_del(&queue->napi);
747
748 if (queue->task) {
749 kthread_stop(queue->task);
750 put_task_struct(queue->task);
751 queue->task = NULL;
752 }
753
754 if (queue->dealloc_task) {
755 kthread_stop(queue->dealloc_task);
756 queue->dealloc_task = NULL;
757 }
758
759 if (queue->tx_irq) {
760 if (queue->tx_irq == queue->rx_irq)
761 unbind_from_irqhandler(queue->tx_irq, queue);
762 else {
763 unbind_from_irqhandler(queue->tx_irq, queue);
764 unbind_from_irqhandler(queue->rx_irq, queue);
765 }
766 queue->tx_irq = 0;
767 }
768
769 xenvif_unmap_frontend_data_rings(queue);
770 }
771
772 xenvif_mcast_addr_list_free(vif);
773 }
774
775 void xenvif_disconnect_ctrl(struct xenvif *vif)
776 {
777 if (vif->ctrl_task) {
778 kthread_stop(vif->ctrl_task);
779 put_task_struct(vif->ctrl_task);
780 vif->ctrl_task = NULL;
781 }
782
783 if (vif->ctrl_irq) {
784 xenvif_deinit_hash(vif);
785 unbind_from_irqhandler(vif->ctrl_irq, vif);
786 vif->ctrl_irq = 0;
787 }
788
789 if (vif->ctrl.sring) {
790 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
791 vif->ctrl.sring);
792 vif->ctrl.sring = NULL;
793 }
794 }
795
796 /* Reverse the relevant parts of xenvif_init_queue().
797 * Used for queue teardown from xenvif_free(), and on the
798 * error handling paths in xenbus.c:connect().
799 */
800 void xenvif_deinit_queue(struct xenvif_queue *queue)
801 {
802 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
803 }
804
805 void xenvif_free(struct xenvif *vif)
806 {
807 struct xenvif_queue *queues = vif->queues;
808 unsigned int num_queues = vif->num_queues;
809 unsigned int queue_index;
810
811 unregister_netdev(vif->dev);
812 free_netdev(vif->dev);
813
814 for (queue_index = 0; queue_index < num_queues; ++queue_index)
815 xenvif_deinit_queue(&queues[queue_index]);
816 vfree(queues);
817
818 module_put(THIS_MODULE);
819 }
This page took 0.049464 seconds and 5 git commands to generate.