From de78d1f9c83d0aceca42c17abbbf730ebdc2fc6e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 25 Sep 2012 00:31:12 +0000 Subject: [PATCH] igb: Lock buffer size at 2K even on systems with larger pages This change locks us in at 2K buffers even on a system that supports larger frames. The reason for this change is to make better use of pages and to reduce the overall truesize of frames generated by igb. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 7 ++--- drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 +-- drivers/net/ethernet/intel/igb/igb_main.c | 27 ++++++++++++-------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1d15bb0b1e91..d3fd0127c0c8 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -132,9 +132,10 @@ struct vf_data_storage { #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ -#define IGB_RXBUFFER_256 256 -#define IGB_RXBUFFER_16384 16384 -#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 /* How many Tx Descriptors do we need to call netif_wake_queue ? */ #define IGB_TX_QUEUE_WAKE 16 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 96c6df65726f..375c0dad8d29 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer_info->dma, - PAGE_SIZE / 2, + IGB_RX_BUFSZ, DMA_FROM_DEVICE); /* verify contents of skb */ @@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer_info->dma, - PAGE_SIZE / 2, + IGB_RX_BUFSZ, DMA_FROM_DEVICE); /* unmap buffer on tx side */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fa7ddec4cfe5..0141ef3ea678 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -554,7 +554,7 @@ rx_ring_summary: 16, 1, page_address(buffer_info->page) + buffer_info->page_offset, - PAGE_SIZE/2, true); + IGB_RX_BUFSZ, true); } } } @@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* set descriptor configuration */ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; -#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 - srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; -#else - srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; -#endif + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; #ifdef CONFIG_IGB_PTP if (hw->mac.type >= e1000_82580) @@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, old_buff->page_offset, - PAGE_SIZE / 2, + IGB_RX_BUFSZ, DMA_FROM_DEVICE); } @@ -5905,18 +5901,19 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, PAGE_SIZE / 2); + rx_buffer->page_offset, size, IGB_RX_BUFSZ); /* avoid re-using remote pages */ if (unlikely(page_to_nid(page) != numa_node_id())) return false; +#if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) return false; /* flip page offset to other buffer */ - rx_buffer->page_offset ^= PAGE_SIZE / 2; + rx_buffer->page_offset ^= IGB_RX_BUFSZ; /* * since we are the only owner of the page and we need to @@ -5924,6 +5921,16 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, * an unnecessary locked operation */ atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += SKB_DATA_ALIGN(size); + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif return true; } @@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - PAGE_SIZE / 2, + IGB_RX_BUFSZ, DMA_FROM_DEVICE); /* pull page into skb */ -- 2.34.1