Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / xen-netback / netback.c
index cd0bd95ccc14b5ac4cac7e13aed5c58ba5280e23..ae34f5fc7fbc503f0feda999a0f729e5ab08cdc7 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <linux/if_vlan.h>
 #include <linux/udp.h>
+#include <linux/highmem.h>
 
 #include <net/tcp.h>
 
 bool separate_tx_rx_irq = 1;
 module_param(separate_tx_rx_irq, bool, 0644);
 
+/* When guest ring is filled up, qdisc queues the packets for us, but we have
+ * to timeout them, otherwise other guests' packets can get stuck there
+ */
+unsigned int rx_drain_timeout_msecs = 10000;
+module_param(rx_drain_timeout_msecs, uint, 0444);
+unsigned int rx_drain_timeout_jiffies;
+
 /*
  * This is the maximum slots a skb can have. If a guest sends a skb
  * which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
 module_param(fatal_skb_slots, uint, 0444);
 
-/*
- * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
- * the maximum slots a valid packet can use. Now this value is defined
- * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
- * all backend.
- */
-#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-
-/*
- * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
- * one or more merged tx requests, otherwise it is the continuation of
- * previous tx request.
- */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
-{
-       return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
                               u8 status);
 
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
        return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
+#define callback_param(vif, pending_idx) \
+       (vif->pending_tx_info[pending_idx].callback_struct)
+
+/* Find the containing VIF's structure from a pointer in pending_tx_info array
+ */
+static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+{
+       u16 pending_idx = ubuf->desc;
+       struct pending_tx_info *temp =
+               container_of(ubuf, struct pending_tx_info, callback_struct);
+       return container_of(temp - pending_idx,
+                           struct xenvif,
+                           pending_tx_info[0]);
+}
+
 /* This is a miniumum size for the linear area to avoid lots of
  * calls to __pskb_pull_tail() as we set up checksum offsets. The
  * value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
        return i & (MAX_PENDING_REQS-1);
 }
 
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
-{
-       return MAX_PENDING_REQS -
-               vif->pending_prod + vif->pending_cons;
-}
-
 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
 {
        RING_IDX prod, cons;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
-                                unsigned long offset, int *head)
+                                unsigned long offset, int *head,
+                                struct xenvif *foreign_vif,
+                                grant_ref_t foreign_gref)
 {
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                copy_gop->flags = GNTCOPY_dest_gref;
                copy_gop->len = bytes;
 
-               copy_gop->source.domid = DOMID_SELF;
-               copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
+               if (foreign_vif) {
+                       copy_gop->source.domid = foreign_vif->domid;
+                       copy_gop->source.u.ref = foreign_gref;
+                       copy_gop->flags |= GNTCOPY_source_gref;
+               } else {
+                       copy_gop->source.domid = DOMID_SELF;
+                       copy_gop->source.u.gmfn =
+                               virt_to_mfn(page_address(page));
+               }
                copy_gop->source.offset = offset;
 
                copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
+       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
+       struct xenvif *foreign_vif = NULL;
 
        old_meta_prod = npo->meta_prod;
 
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
+       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                (ubuf->callback == &xenvif_zerocopy_callback)) {
+               int i = 0;
+               foreign_vif = ubuf_to_vif(ubuf);
+
+               do {
+                       u16 pending_idx = ubuf->desc;
+                       foreign_grefs[i++] =
+                               foreign_vif->pending_tx_info[pending_idx].req.gref;
+                       ubuf = (struct ubuf_info *) ubuf->ctx;
+               } while (ubuf);
+       }
+
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        len = skb_tail_pointer(skb) - data;
 
                xenvif_gop_frag_copy(vif, skb, npo,
-                                    virt_to_page(data), len, offset, &head);
+                                    virt_to_page(data), len, offset, &head,
+                                    NULL,
+                                    0);
                data += len;
        }
 
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
-                                    &head);
+                                    &head,
+                                    foreign_vif,
+                                    foreign_grefs[i]);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
        }
 }
 
-struct skb_cb_overlay {
+struct xenvif_rx_cb {
        int meta_slots_used;
 };
 
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
 void xenvif_kick_thread(struct xenvif *vif)
 {
        wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
        LIST_HEAD(notify);
        int ret;
        unsigned long offset;
-       struct skb_cb_overlay *sco;
        bool need_to_notify = false;
 
        struct netrx_pending_operations npo = {
@@ -531,10 +560,8 @@ static void xenvif_rx_action(struct xenvif *vif)
                } else
                        vif->rx_last_skb_slots = 0;
 
-               sco = (struct skb_cb_overlay *)skb->cb;
-
                old_req_cons = vif->rx.req_cons;
-               sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
+               XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
                ring_slots_used = vif->rx.req_cons - old_req_cons;
 
                BUG_ON(ring_slots_used > max_slots_needed);
@@ -551,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
        gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
-               sco = (struct skb_cb_overlay *)skb->cb;
 
                if ((1 << vif->meta[npo.meta_cons].gso_type) &
                    vif->gso_prefix_mask) {
@@ -562,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
 
                        resp->offset = vif->meta[npo.meta_cons].gso_size;
                        resp->id = vif->meta[npo.meta_cons].id;
-                       resp->status = sco->meta_slots_used;
+                       resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
 
                        npo.meta_cons++;
-                       sco->meta_slots_used--;
+                       XENVIF_RX_CB(skb)->meta_slots_used--;
                }
 
 
                vif->dev->stats.tx_bytes += skb->len;
                vif->dev->stats.tx_packets++;
 
-               status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
+               status = xenvif_check_gop(vif,
+                                         XENVIF_RX_CB(skb)->meta_slots_used,
+                                         &npo);
 
-               if (sco->meta_slots_used == 1)
+               if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
                        flags = 0;
                else
                        flags = XEN_NETRXF_more_data;
@@ -611,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
 
                xenvif_add_frag_responses(vif, status,
                                          vif->meta + npo.meta_cons + 1,
-                                         sco->meta_slots_used);
+                                         XENVIF_RX_CB(skb)->meta_slots_used);
 
                RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
 
                need_to_notify |= !!ret;
 
-               npo.meta_cons += sco->meta_slots_used;
+               npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
                dev_kfree_skb(skb);
        }
 
@@ -667,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
                          struct xen_netif_tx_request *txp, RING_IDX end)
 {
        RING_IDX cons = vif->tx.req_cons;
+       unsigned long flags;
 
        do {
+               spin_lock_irqsave(&vif->response_lock, flags);
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+               spin_unlock_irqrestore(&vif->response_lock, flags);
                if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -781,180 +812,168 @@ static int xenvif_count_requests(struct xenvif *vif,
        return slots;
 }
 
-static struct page *xenvif_alloc_page(struct xenvif *vif,
-                                     u16 pending_idx)
+
+struct xenvif_tx_cb {
+       u16 pending_idx;
+};
+
+#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+
+static inline void xenvif_tx_create_gop(struct xenvif *vif,
+                                       u16 pending_idx,
+                                       struct xen_netif_tx_request *txp,
+                                       struct gnttab_map_grant_ref *gop)
 {
-       struct page *page;
+       vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
+       gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
+                         GNTMAP_host_map | GNTMAP_readonly,
+                         txp->gref, vif->domid);
 
-       page = alloc_page(GFP_ATOMIC|__GFP_COLD);
-       if (!page)
+       memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+              sizeof(*txp));
+}
+
+static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+{
+       struct sk_buff *skb =
+               alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+                         GFP_ATOMIC | __GFP_NOWARN);
+       if (unlikely(skb == NULL))
                return NULL;
-       vif->mmap_pages[pending_idx] = page;
 
-       return page;
+       /* Packets passed to netif_rx() must have some headroom. */
+       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+       /* Initialize it here to avoid later surprises */
+       skb_shinfo(skb)->destructor_arg = NULL;
+
+       return skb;
 }
 
-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
-                                              struct sk_buff *skb,
-                                              struct xen_netif_tx_request *txp,
-                                              struct gnttab_copy *gop)
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+                                                       struct sk_buff *skb,
+                                                       struct xen_netif_tx_request *txp,
+                                                       struct gnttab_map_grant_ref *gop)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
-       u16 pending_idx = *((u16 *)skb->data);
-       u16 head_idx = 0;
-       int slot, start;
-       struct page *page;
-       pending_ring_idx_t index, start_idx = 0;
-       uint16_t dst_offset;
-       unsigned int nr_slots;
-       struct pending_tx_info *first = NULL;
+       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       int start;
+       pending_ring_idx_t index;
+       unsigned int nr_slots, frag_overflow = 0;
 
        /* At this point shinfo->nr_frags is in fact the number of
         * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
         */
+       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
+               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
+               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+               shinfo->nr_frags = MAX_SKB_FRAGS;
+       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
-       /* Coalesce tx requests, at this point the packet passed in
-        * should be <= 64K. Any packets larger than 64K have been
-        * handled in xenvif_count_requests().
-        */
-       for (shinfo->nr_frags = slot = start; slot < nr_slots;
-            shinfo->nr_frags++) {
-               struct pending_tx_info *pending_tx_info =
-                       vif->pending_tx_info;
+       for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+            shinfo->nr_frags++, txp++, gop++) {
+               index = pending_index(vif->pending_cons++);
+               pending_idx = vif->pending_ring[index];
+               xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+               frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+       }
 
-               page = alloc_page(GFP_ATOMIC|__GFP_COLD);
-               if (!page)
-                       goto err;
-
-               dst_offset = 0;
-               first = NULL;
-               while (dst_offset < PAGE_SIZE && slot < nr_slots) {
-                       gop->flags = GNTCOPY_source_gref;
-
-                       gop->source.u.ref = txp->gref;
-                       gop->source.domid = vif->domid;
-                       gop->source.offset = txp->offset;
-
-                       gop->dest.domid = DOMID_SELF;
-
-                       gop->dest.offset = dst_offset;
-                       gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-
-                       if (dst_offset + txp->size > PAGE_SIZE) {
-                               /* This page can only merge a portion
-                                * of tx request. Do not increment any
-                                * pointer / counter here. The txp
-                                * will be dealt with in future
-                                * rounds, eventually hitting the
-                                * `else` branch.
-                                */
-                               gop->len = PAGE_SIZE - dst_offset;
-                               txp->offset += gop->len;
-                               txp->size -= gop->len;
-                               dst_offset += gop->len; /* quit loop */
-                       } else {
-                               /* This tx request can be merged in the page */
-                               gop->len = txp->size;
-                               dst_offset += gop->len;
-
-                               index = pending_index(vif->pending_cons++);
-
-                               pending_idx = vif->pending_ring[index];
-
-                               memcpy(&pending_tx_info[pending_idx].req, txp,
-                                      sizeof(*txp));
-
-                               /* Poison these fields, corresponding
-                                * fields for head tx req will be set
-                                * to correct values after the loop.
-                                */
-                               vif->mmap_pages[pending_idx] = (void *)(~0UL);
-                               pending_tx_info[pending_idx].head =
-                                       INVALID_PENDING_RING_IDX;
-
-                               if (!first) {
-                                       first = &pending_tx_info[pending_idx];
-                                       start_idx = index;
-                                       head_idx = pending_idx;
-                               }
-
-                               txp++;
-                               slot++;
-                       }
+       if (frag_overflow) {
+               struct sk_buff *nskb = xenvif_alloc_skb(0);
+               if (unlikely(nskb == NULL)) {
+                       if (net_ratelimit())
+                               netdev_err(vif->dev,
+                                          "Can't allocate the frag_list skb.\n");
+                       return NULL;
+               }
 
-                       gop++;
+               shinfo = skb_shinfo(nskb);
+               frags = shinfo->frags;
+
+               for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+                    shinfo->nr_frags++, txp++, gop++) {
+                       index = pending_index(vif->pending_cons++);
+                       pending_idx = vif->pending_ring[index];
+                       xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+                       frag_set_pending_idx(&frags[shinfo->nr_frags],
+                                            pending_idx);
                }
 
-               first->req.offset = 0;
-               first->req.size = dst_offset;
-               first->head = start_idx;
-               vif->mmap_pages[head_idx] = page;
-               frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+               skb_shinfo(skb)->frag_list = nskb;
        }
 
-       BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
-
        return gop;
-err:
-       /* Unwind, freeing all pages and sending error responses. */
-       while (shinfo->nr_frags-- > start) {
-               xenvif_idx_release(vif,
-                               frag_get_pending_idx(&frags[shinfo->nr_frags]),
-                               XEN_NETIF_RSP_ERROR);
+}
+
+static inline void xenvif_grant_handle_set(struct xenvif *vif,
+                                          u16 pending_idx,
+                                          grant_handle_t handle)
+{
+       if (unlikely(vif->grant_tx_handle[pending_idx] !=
+                    NETBACK_INVALID_HANDLE)) {
+               netdev_err(vif->dev,
+                          "Trying to overwrite active handle! pending_idx: %x\n",
+                          pending_idx);
+               BUG();
        }
-       /* The head too, if necessary. */
-       if (start)
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+       vif->grant_tx_handle[pending_idx] = handle;
+}
 
-       return NULL;
+static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+                                            u16 pending_idx)
+{
+       if (unlikely(vif->grant_tx_handle[pending_idx] ==
+                    NETBACK_INVALID_HANDLE)) {
+               netdev_err(vif->dev,
+                          "Trying to unmap invalid handle! pending_idx: %x\n",
+                          pending_idx);
+               BUG();
+       }
+       vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 }
 
 static int xenvif_tx_check_gop(struct xenvif *vif,
                               struct sk_buff *skb,
-                              struct gnttab_copy **gopp)
+                              struct gnttab_map_grant_ref **gopp)
 {
-       struct gnttab_copy *gop = *gopp;
-       u16 pending_idx = *((u16 *)skb->data);
+       struct gnttab_map_grant_ref *gop = *gopp;
+       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        struct pending_tx_info *tx_info;
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
-       u16 peek; /* peek into next tx request */
+       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = gop->status;
        if (unlikely(err))
                xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+       else
+               xenvif_grant_handle_set(vif, pending_idx , gop->handle);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
+check_frags:
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t head;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
                tx_info = &vif->pending_tx_info[pending_idx];
-               head = tx_info->head;
 
                /* Check error status: if okay then remember grant handle. */
-               do {
-                       newerr = (++gop)->status;
-                       if (newerr)
-                               break;
-                       peek = vif->pending_ring[pending_index(++head)];
-               } while (!pending_tx_is_head(vif, peek));
+               newerr = (++gop)->status;
 
                if (likely(!newerr)) {
+                       xenvif_grant_handle_set(vif, pending_idx , gop->handle);
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xenvif_idx_release(vif, pending_idx,
-                                                  XEN_NETIF_RSP_OKAY);
+                               xenvif_idx_unmap(vif, pending_idx);
                        continue;
                }
 
@@ -964,20 +983,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-
                /* First error: invalidate header and preceding fragments. */
-               pending_idx = *((u16 *)skb->data);
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+               if (!first_skb)
+                       pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               else
+                       pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               xenvif_idx_unmap(vif, pending_idx);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_release(vif, pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_unmap(vif, pending_idx);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
+       if (skb_has_frag_list(skb)) {
+               first_skb = skb;
+               skb = shinfo->frag_list;
+               shinfo = skb_shinfo(skb);
+               nr_frags = shinfo->nr_frags;
+               start = 0;
+
+               goto check_frags;
+       }
+
+       /* There was a mapping error in the frag_list skb. We have to unmap
+        * the first skb's frags
+        */
+       if (first_skb && err) {
+               int j;
+               shinfo = skb_shinfo(first_skb);
+               pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+               for (j = start; j < shinfo->nr_frags; j++) {
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+                       xenvif_idx_unmap(vif, pending_idx);
+               }
+       }
+
        *gopp = gop + 1;
        return err;
 }
@@ -987,6 +1031,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i;
+       u16 prev_pending_idx = INVALID_PENDING_IDX;
+
+       if (skb_shinfo(skb)->destructor_arg)
+               prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 
        for (i = 0; i < nr_frags; i++) {
                skb_frag_t *frag = shinfo->frags + i;
@@ -996,6 +1044,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
 
                pending_idx = frag_get_pending_idx(frag);
 
+               /* If this is not the first frag, chain it to the previous*/
+               if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
+                       skb_shinfo(skb)->destructor_arg =
+                               &callback_param(vif, pending_idx);
+               else if (likely(pending_idx != prev_pending_idx))
+                       callback_param(vif, prev_pending_idx).ctx =
+                               &callback_param(vif, pending_idx);
+
+               callback_param(vif, pending_idx).ctx = NULL;
+               prev_pending_idx = pending_idx;
+
                txp = &vif->pending_tx_info[pending_idx].req;
                page = virt_to_page(idx_to_kaddr(vif, pending_idx));
                __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -1003,10 +1062,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
                skb->data_len += txp->size;
                skb->truesize += txp->size;
 
-               /* Take an extra reference to offset xenvif_idx_release */
+               /* Take an extra reference to offset network stack's put_page */
                get_page(vif->mmap_pages[pending_idx]);
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
        }
+       /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
+        * overlaps with "index", and "mapping" is not set. I think mapping
+        * should be set. If delivered to local stack, it would drop this
+        * skb in sk_filter unless the socket has the right to use it.
+        */
+       skb->pfmemalloc = false;
 }
 
 static int xenvif_get_extras(struct xenvif *vif,
@@ -1126,16 +1190,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
 
 static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
 {
-       struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+       struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
        struct sk_buff *skb;
        int ret;
 
-       while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
-               < MAX_PENDING_REQS) &&
-              (skb_queue_len(&vif->tx_queue) < budget)) {
+       while (skb_queue_len(&vif->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
                struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
-               struct page *page;
                struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
                u16 pending_idx;
                RING_IDX idx;
@@ -1211,8 +1272,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                            ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
                        PKT_PROT_LEN : txreq.size;
 
-               skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
-                               GFP_ATOMIC | __GFP_NOWARN);
+               skb = xenvif_alloc_skb(data_len);
                if (unlikely(skb == NULL)) {
                        netdev_dbg(vif->dev,
                                   "Can't allocate a skb in start_xmit.\n");
@@ -1220,9 +1280,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                        break;
                }
 
-               /* Packets passed to netif_rx() must have some headroom. */
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1234,31 +1291,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                        }
                }
 
-               /* XXX could copy straight to head */
-               page = xenvif_alloc_page(vif, pending_idx);
-               if (!page) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(vif, &txreq, idx);
-                       break;
-               }
-
-               gop->source.u.ref = txreq.gref;
-               gop->source.domid = vif->domid;
-               gop->source.offset = txreq.offset;
-
-               gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-               gop->dest.domid = DOMID_SELF;
-               gop->dest.offset = txreq.offset;
-
-               gop->len = txreq.size;
-               gop->flags = GNTCOPY_source_gref;
+               xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
 
                gop++;
 
-               memcpy(&vif->pending_tx_info[pending_idx].req,
-                      &txreq, sizeof(txreq));
-               vif->pending_tx_info[pending_idx].head = index;
-               *((u16 *)skb->data) = pending_idx;
+               XENVIF_TX_CB(skb)->pending_idx = pending_idx;
 
                __skb_put(skb, data_len);
 
@@ -1286,17 +1323,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
 
                vif->tx.req_cons = idx;
 
-               if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
+               if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
                        break;
        }
 
-       return gop - vif->tx_copy_ops;
+       return gop - vif->tx_map_ops;
 }
 
+/* Consolidate skb with a frag_list into a brand new one with local pages on
+ * frags. Returns 0 or -ENOMEM if can't allocate new pages.
+ */
+static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+{
+       unsigned int offset = skb_headlen(skb);
+       skb_frag_t frags[MAX_SKB_FRAGS];
+       int i;
+       struct ubuf_info *uarg;
+       struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
+       vif->tx_zerocopy_sent += 2;
+       vif->tx_frag_overflow++;
+
+       xenvif_fill_frags(vif, nskb);
+       /* Subtract frags size, we will correct it later */
+       skb->truesize -= skb->data_len;
+       skb->len += nskb->len;
+       skb->data_len += nskb->len;
+
+       /* create a brand new frags array and coalesce there */
+       for (i = 0; offset < skb->len; i++) {
+               struct page *page;
+               unsigned int len;
+
+               BUG_ON(i >= MAX_SKB_FRAGS);
+               page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+               if (!page) {
+                       int j;
+                       skb->truesize += skb->data_len;
+                       for (j = 0; j < i; j++)
+                               put_page(frags[j].page.p);
+                       return -ENOMEM;
+               }
+
+               if (offset + PAGE_SIZE < skb->len)
+                       len = PAGE_SIZE;
+               else
+                       len = skb->len - offset;
+               if (skb_copy_bits(skb, offset, page_address(page), len))
+                       BUG();
+
+               offset += len;
+               frags[i].page.p = page;
+               frags[i].page_offset = 0;
+               skb_frag_size_set(&frags[i], len);
+       }
+       /* swap out with old one */
+       memcpy(skb_shinfo(skb)->frags,
+              frags,
+              i * sizeof(skb_frag_t));
+       skb_shinfo(skb)->nr_frags = i;
+       skb->truesize += i * PAGE_SIZE;
+
+       /* remove traces of mapped pages and frag_list */
+       skb_frag_list_init(skb);
+       uarg = skb_shinfo(skb)->destructor_arg;
+       uarg->callback(uarg, true);
+       skb_shinfo(skb)->destructor_arg = NULL;
+
+       skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+       kfree_skb(nskb);
+
+       return 0;
+}
 
 static int xenvif_tx_submit(struct xenvif *vif)
 {
-       struct gnttab_copy *gop = vif->tx_copy_ops;
+       struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
        struct sk_buff *skb;
        int work_done = 0;
 
@@ -1305,7 +1407,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
                u16 pending_idx;
                unsigned data_len;
 
-               pending_idx = *((u16 *)skb->data);
+               pending_idx = XENVIF_TX_CB(skb)->pending_idx;
                txp = &vif->pending_tx_info[pending_idx].req;
 
                /* Check the remap error code. */
@@ -1320,14 +1422,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
                memcpy(skb->data,
                       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
                       data_len);
+               callback_param(vif, pending_idx).ctx = NULL;
                if (data_len < txp->size) {
                        /* Append the packet payload as a fragment. */
                        txp->offset += data_len;
                        txp->size -= data_len;
+                       skb_shinfo(skb)->destructor_arg =
+                               &callback_param(vif, pending_idx);
                } else {
                        /* Schedule a response immediately. */
-                       xenvif_idx_release(vif, pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_unmap(vif, pending_idx);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1337,6 +1441,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
 
                xenvif_fill_frags(vif, skb);
 
+               if (unlikely(skb_has_frag_list(skb))) {
+                       if (xenvif_handle_frag_list(vif, skb)) {
+                               if (net_ratelimit())
+                                       netdev_err(vif->dev,
+                                                  "Not enough memory to consolidate frag_list!\n");
+                               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+                               kfree_skb(skb);
+                               continue;
+                       }
+               }
+
                if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
                        int target = min_t(int, skb->len, PKT_PROT_LEN);
                        __pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1349,6 +1464,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
                if (checksum_setup(vif, skb)) {
                        netdev_dbg(vif->dev,
                                   "Can't setup checksum in net_tx_action\n");
+                       /* We have to set this flag to trigger the callback */
+                       if (skb_shinfo(skb)->destructor_arg)
+                               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
                        kfree_skb(skb);
                        continue;
                }
@@ -1374,17 +1492,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
 
                work_done++;
 
+               /* Set this flag right before netif_receive_skb, otherwise
+                * someone might think this packet already left netback, and
+                * do a skb_copy_ubufs while we are still in control of the
+                * skb. E.g. the __pskb_pull_tail earlier can do such thing.
+                */
+               if (skb_shinfo(skb)->destructor_arg) {
+                       skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+                       vif->tx_zerocopy_sent++;
+               }
+
                netif_receive_skb(skb);
        }
 
        return work_done;
 }
 
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+{
+       unsigned long flags;
+       pending_ring_idx_t index;
+       struct xenvif *vif = ubuf_to_vif(ubuf);
+
+       /* This is the only place where we grab this lock, to protect callbacks
+        * from each other.
+        */
+       spin_lock_irqsave(&vif->callback_lock, flags);
+       do {
+               u16 pending_idx = ubuf->desc;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+               BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+                       MAX_PENDING_REQS);
+               index = pending_index(vif->dealloc_prod);
+               vif->dealloc_ring[index] = pending_idx;
+               /* Sync with xenvif_tx_dealloc_action:
+                * insert idx then incr producer.
+                */
+               smp_wmb();
+               vif->dealloc_prod++;
+       } while (ubuf);
+       wake_up(&vif->dealloc_wq);
+       spin_unlock_irqrestore(&vif->callback_lock, flags);
+
+       if (likely(zerocopy_success))
+               vif->tx_zerocopy_success++;
+       else
+               vif->tx_zerocopy_fail++;
+}
+
+static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+{
+       struct gnttab_unmap_grant_ref *gop;
+       pending_ring_idx_t dc, dp;
+       u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
+       unsigned int i = 0;
+
+       dc = vif->dealloc_cons;
+       gop = vif->tx_unmap_ops;
+
+       /* Free up any grants we have finished using */
+       do {
+               dp = vif->dealloc_prod;
+
+               /* Ensure we see all indices enqueued by all
+                * xenvif_zerocopy_callback().
+                */
+               smp_rmb();
+
+               while (dc != dp) {
+                       BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+                       pending_idx =
+                               vif->dealloc_ring[pending_index(dc++)];
+
+                       pending_idx_release[gop-vif->tx_unmap_ops] =
+                               pending_idx;
+                       vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
+                               vif->mmap_pages[pending_idx];
+                       gnttab_set_unmap_op(gop,
+                                           idx_to_kaddr(vif, pending_idx),
+                                           GNTMAP_host_map,
+                                           vif->grant_tx_handle[pending_idx]);
+                       xenvif_grant_handle_reset(vif, pending_idx);
+                       ++gop;
+               }
+
+       } while (dp != vif->dealloc_prod);
+
+       vif->dealloc_cons = dc;
+
+       if (gop - vif->tx_unmap_ops > 0) {
+               int ret;
+               ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+                                       NULL,
+                                       vif->pages_to_unmap,
+                                       gop - vif->tx_unmap_ops);
+               if (ret) {
+                       netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+                                  gop - vif->tx_unmap_ops, ret);
+                       for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+                               if (gop[i].status != GNTST_okay)
+                                       netdev_err(vif->dev,
+                                                  " host_addr: %llx handle: %x status: %d\n",
+                                                  gop[i].host_addr,
+                                                  gop[i].handle,
+                                                  gop[i].status);
+                       }
+                       BUG();
+               }
+       }
+
+       for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
+               xenvif_idx_release(vif, pending_idx_release[i],
+                                  XEN_NETIF_RSP_OKAY);
+}
+
+
 /* Called after netfront has transmitted */
 int xenvif_tx_action(struct xenvif *vif, int budget)
 {
        unsigned nr_gops;
-       int work_done;
+       int work_done, ret;
 
        if (unlikely(!tx_work_todo(vif)))
                return 0;
@@ -1394,7 +1621,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
        if (nr_gops == 0)
                return 0;
 
-       gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+       ret = gnttab_map_refs(vif->tx_map_ops,
+                             NULL,
+                             vif->pages_to_map,
+                             nr_gops);
+       BUG_ON(ret);
 
        work_done = xenvif_tx_submit(vif);
 
@@ -1405,45 +1636,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
                               u8 status)
 {
        struct pending_tx_info *pending_tx_info;
-       pending_ring_idx_t head;
-       u16 peek; /* peek into next tx request */
-
-       BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
-
-       /* Already complete? */
-       if (vif->mmap_pages[pending_idx] == NULL)
-               return;
+       pending_ring_idx_t index;
+       unsigned long flags;
 
        pending_tx_info = &vif->pending_tx_info[pending_idx];
-
-       head = pending_tx_info->head;
-
-       BUG_ON(!pending_tx_is_head(vif, head));
-       BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
-
-       do {
-               pending_ring_idx_t index;
-               pending_ring_idx_t idx = pending_index(head);
-               u16 info_idx = vif->pending_ring[idx];
-
-               pending_tx_info = &vif->pending_tx_info[info_idx];
-               make_tx_response(vif, &pending_tx_info->req, status);
-
-               /* Setting any number other than
-                * INVALID_PENDING_RING_IDX indicates this slot is
-                * starting a new packet / ending a previous packet.
-                */
-               pending_tx_info->head = 0;
-
-               index = pending_index(vif->pending_prod++);
-               vif->pending_ring[index] = vif->pending_ring[info_idx];
-
-               peek = vif->pending_ring[pending_index(++head)];
-
-       } while (!pending_tx_is_head(vif, peek));
-
-       put_page(vif->mmap_pages[pending_idx]);
-       vif->mmap_pages[pending_idx] = NULL;
+       spin_lock_irqsave(&vif->response_lock, flags);
+       make_tx_response(vif, &pending_tx_info->req, status);
+       index = pending_index(vif->pending_prod);
+       vif->pending_ring[index] = pending_idx;
+       /* TX shouldn't use the index before we give it back here */
+       mb();
+       vif->pending_prod++;
+       spin_unlock_irqrestore(&vif->response_lock, flags);
 }
 
 
@@ -1491,23 +1695,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
        return resp;
 }
 
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+{
+       int ret;
+       struct gnttab_unmap_grant_ref tx_unmap_op;
+
+       gnttab_set_unmap_op(&tx_unmap_op,
+                           idx_to_kaddr(vif, pending_idx),
+                           GNTMAP_host_map,
+                           vif->grant_tx_handle[pending_idx]);
+       xenvif_grant_handle_reset(vif, pending_idx);
+
+       ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
+                               &vif->mmap_pages[pending_idx], 1);
+       if (ret) {
+               netdev_err(vif->dev,
+                          "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+                          ret,
+                          pending_idx,
+                          tx_unmap_op.host_addr,
+                          tx_unmap_op.handle,
+                          tx_unmap_op.status);
+               BUG();
+       }
+
+       xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+}
+
 static inline int rx_work_todo(struct xenvif *vif)
 {
-       return !skb_queue_empty(&vif->rx_queue) &&
-              xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
+       return (!skb_queue_empty(&vif->rx_queue) &&
+              xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
+              vif->rx_queue_purge;
 }
 
 static inline int tx_work_todo(struct xenvif *vif)
 {
 
-       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
-           (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
-            < MAX_PENDING_REQS))
+       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
                return 1;
 
        return 0;
 }
 
+static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+{
+       return vif->dealloc_cons != vif->dealloc_prod;
+}
+
 void xenvif_unmap_frontend_rings(struct xenvif *vif)
 {
        if (vif->tx.sring)
@@ -1565,7 +1800,7 @@ static void xenvif_start_queue(struct xenvif *vif)
                netif_wake_queue(vif->dev);
 }
 
-int xenvif_kthread(void *data)
+int xenvif_kthread_guest_rx(void *data)
 {
        struct xenvif *vif = data;
        struct sk_buff *skb;
@@ -1577,12 +1812,19 @@ int xenvif_kthread(void *data)
                if (kthread_should_stop())
                        break;
 
+               if (vif->rx_queue_purge) {
+                       skb_queue_purge(&vif->rx_queue);
+                       vif->rx_queue_purge = false;
+               }
+
                if (!skb_queue_empty(&vif->rx_queue))
                        xenvif_rx_action(vif);
 
                if (skb_queue_empty(&vif->rx_queue) &&
-                   netif_queue_stopped(vif->dev))
+                   netif_queue_stopped(vif->dev)) {
+                       del_timer_sync(&vif->wake_queue);
                        xenvif_start_queue(vif);
+               }
 
                cond_resched();
        }
@@ -1594,6 +1836,28 @@ int xenvif_kthread(void *data)
        return 0;
 }
 
+int xenvif_dealloc_kthread(void *data)
+{
+       struct xenvif *vif = data;
+
+       while (!kthread_should_stop()) {
+               wait_event_interruptible(vif->dealloc_wq,
+                                        tx_dealloc_work_todo(vif) ||
+                                        kthread_should_stop());
+               if (kthread_should_stop())
+                       break;
+
+               xenvif_tx_dealloc_action(vif);
+               cond_resched();
+       }
+
+       /* Unmap anything remaining*/
+       if (tx_dealloc_work_todo(vif))
+               xenvif_tx_dealloc_action(vif);
+
+       return 0;
+}
+
 static int __init netback_init(void)
 {
        int rc = 0;
@@ -1611,6 +1875,8 @@ static int __init netback_init(void)
        if (rc)
                goto failed_init;
 
+       rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+
        return 0;
 
 failed_init:
This page took 0.05017 seconds and 5 git commands to generate.