Merge tag 'powerpc-4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_rx.c
index 99b5407f2278c0f292961cf8b458e4b4dfa584e7..2040dad8611df21ac63ac4b31cdac97e4b87af31 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <net/busy_poll.h>
+#include <linux/bpf.h>
 #include <linux/mlx4/cq.h>
 #include <linux/slab.h>
 #include <linux/mlx4/qp.h>
@@ -57,7 +58,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
        struct page *page;
        dma_addr_t dma;
 
-       for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
+       for (order = frag_info->order; ;) {
                gfp_t gfp = _gfp;
 
                if (order)
@@ -70,7 +71,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                        return -ENOMEM;
        }
        dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
-                          PCI_DMA_FROMDEVICE);
+                          frag_info->dma_dir);
        if (dma_mapping_error(priv->ddev, dma)) {
                put_page(page);
                return -ENOMEM;
@@ -124,7 +125,8 @@ out:
        while (i--) {
                if (page_alloc[i].page != ring_alloc[i].page) {
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
+                               page_alloc[i].page_size,
+                               priv->frag_info[i].dma_dir);
                        page = page_alloc[i].page;
                        /* Revert changes done by mlx4_alloc_pages */
                        page_ref_sub(page, page_alloc[i].page_size /
@@ -145,7 +147,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
 
        if (next_frag_end > frags[i].page_size)
                dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
-                              PCI_DMA_FROMDEVICE);
+                              frag_info->dma_dir);
 
        if (frags[i].page)
                put_page(frags[i].page);
@@ -176,7 +178,8 @@ out:
 
                page_alloc = &ring->page_alloc[i];
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                              page_alloc->page_size, PCI_DMA_FROMDEVICE);
+                              page_alloc->page_size,
+                              priv->frag_info[i].dma_dir);
                page = page_alloc->page;
                /* Revert changes done by mlx4_alloc_pages */
                page_ref_sub(page, page_alloc->page_size /
@@ -201,7 +204,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
                       i, page_count(page_alloc->page));
 
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
+                               page_alloc->page_size, frag_info->dma_dir);
                while (page_alloc->page_offset + frag_info->frag_stride <
                       page_alloc->page_size) {
                        put_page(page_alloc->page);
@@ -244,6 +247,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
        struct mlx4_en_rx_alloc *frags = ring->rx_info +
                                        (index << priv->log_rx_info);
 
+       if (ring->page_cache.index > 0) {
+               frags[0] = ring->page_cache.buf[--ring->page_cache.index];
+               rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
+               return 0;
+       }
+
        return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
 }
 
@@ -502,13 +511,35 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
        }
 }
 
+/* When the rx ring is running in page-per-packet mode, a released frame can go
+ * directly into a small cache, to avoid unmapping or touching the page
+ * allocator. In bpf prog performance scenarios, buffers are either forwarded
+ * or dropped, never converted to skbs, so every page can come directly from
+ * this cache when it is sized to be a multiple of the napi budget.
+ */
+bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+                       struct mlx4_en_rx_alloc *frame)
+{
+       struct mlx4_en_page_cache *cache = &ring->page_cache;
+
+       if (cache->index >= MLX4_EN_CACHE_SIZE)
+               return false;
+
+       cache->buf[cache->index++] = *frame;
+       return true;
+}
+
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_rx_ring **pring,
                             u32 size, u16 stride)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rx_ring *ring = *pring;
+       struct bpf_prog *old_prog;
 
+       old_prog = READ_ONCE(ring->xdp_prog);
+       if (old_prog)
+               bpf_prog_put(old_prog);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
        vfree(ring->rx_info);
        ring->rx_info = NULL;
@@ -519,6 +550,16 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
                                struct mlx4_en_rx_ring *ring)
 {
+       int i;
+
+       for (i = 0; i < ring->page_cache.index; i++) {
+               struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
+
+               dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
+                              priv->frag_info[0].dma_dir);
+               put_page(frame->page);
+       }
+       ring->page_cache.index = 0;
        mlx4_en_free_rx_buf(priv, ring);
        if (ring->stride <= TXBB_SIZE)
                ring->buf -= TXBB_SIZE;
@@ -740,7 +781,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
        struct mlx4_en_rx_alloc *frags;
        struct mlx4_en_rx_desc *rx_desc;
+       struct bpf_prog *xdp_prog;
+       int doorbell_pending;
        struct sk_buff *skb;
+       int tx_index;
        int index;
        int nr;
        unsigned int length;
@@ -756,6 +800,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        if (budget <= 0)
                return polled;
 
+       xdp_prog = READ_ONCE(ring->xdp_prog);
+       doorbell_pending = 0;
+       tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
+
        /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
         * descriptor offset can be deduced from the CQE index instead of
         * reading 'cqe->index' */
@@ -832,6 +880,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
                        (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
 
+               /* A bpf program gets first chance to drop the packet. It may
+                * read bytes but not past the end of the frag.
+                */
+               if (xdp_prog) {
+                       struct xdp_buff xdp;
+                       dma_addr_t dma;
+                       u32 act;
+
+                       dma = be64_to_cpu(rx_desc->data[0].addr);
+                       dma_sync_single_for_cpu(priv->ddev, dma,
+                                               priv->frag_info[0].frag_size,
+                                               DMA_FROM_DEVICE);
+
+                       xdp.data = page_address(frags[0].page) +
+                                                       frags[0].page_offset;
+                       xdp.data_end = xdp.data + length;
+
+                       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+                       switch (act) {
+                       case XDP_PASS:
+                               break;
+                       case XDP_TX:
+                               if (!mlx4_en_xmit_frame(frags, dev,
+                                                       length, tx_index,
+                                                       &doorbell_pending))
+                                       goto consumed;
+                               break;
+                       default:
+                               bpf_warn_invalid_xdp_action(act);
+                       case XDP_ABORTED:
+                       case XDP_DROP:
+                               if (mlx4_en_rx_recycle(ring, frags))
+                                       goto consumed;
+                               goto next;
+                       }
+               }
+
                if (likely(dev->features & NETIF_F_RXCSUM)) {
                        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
                                                      MLX4_CQE_STATUS_UDP)) {
@@ -983,6 +1068,7 @@ next:
                for (nr = 0; nr < priv->num_frags; nr++)
                        mlx4_en_free_frag(priv, frags, nr);
 
+consumed:
                ++cq->mcq.cons_index;
                index = (cq->mcq.cons_index) & ring->size_mask;
                cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
@@ -991,6 +1077,9 @@ next:
        }
 
 out:
+       if (doorbell_pending)
+               mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
+
        AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
        mlx4_cq_set_ci(&cq->mcq);
        wmb(); /* ensure HW sees CQ consumer before we post new buffers */
@@ -1058,22 +1147,35 @@ static const int frag_sizes[] = {
 
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
+       enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE;
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
-        * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
-        */
-       int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
+       int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
+       int order = MLX4_EN_ALLOC_PREFER_ORDER;
+       u32 align = SMP_CACHE_BYTES;
        int buf_size = 0;
        int i = 0;
 
+       /* bpf requires buffers to be set up as 1 packet per page.
+        * This only works when num_frags == 1.
+        */
+       if (priv->xdp_ring_num) {
+               dma_dir = PCI_DMA_BIDIRECTIONAL;
+               /* This will gain efficient xdp frame recycling at the expense
+                * of more costly truesize accounting
+                */
+               align = PAGE_SIZE;
+               order = 0;
+       }
+
        while (buf_size < eff_mtu) {
+               priv->frag_info[i].order = order;
                priv->frag_info[i].frag_size =
                        (eff_mtu > buf_size + frag_sizes[i]) ?
                                frag_sizes[i] : eff_mtu - buf_size;
                priv->frag_info[i].frag_prefix_size = buf_size;
                priv->frag_info[i].frag_stride =
-                               ALIGN(priv->frag_info[i].frag_size,
-                                     SMP_CACHE_BYTES);
+                               ALIGN(priv->frag_info[i].frag_size, align);
+               priv->frag_info[i].dma_dir = dma_dir;
                buf_size += priv->frag_info[i].frag_size;
                i++;
        }
This page took 0.028581 seconds and 5 git commands to generate.