sfc: Make initial fill of RX descriptors synchronous
[deliverable/linux.git] / drivers / net / ethernet / sfc / rx.c
index 8f09e686fc2392a80f56610c78a61c6374b4a410..8671bc199a9d03d26876203cb0e9eb596790b291 100644 (file)
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
 
 void efx_rx_config_page_split(struct efx_nic *efx)
 {
-       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
                                      EFX_RX_BUF_ALIGNMENT);
        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
                ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  * 0 on success. If a single page can be used for multiple buffers,
  * then the page will either be inserted fully, or not at all.
  */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
        do {
                page = efx_reuse_page(rx_queue);
                if (page == NULL) {
-                       page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+                       page = alloc_pages(__GFP_COLD | __GFP_COMP |
+                                          (atomic ? GFP_ATOMIC : GFP_KERNEL),
                                           efx->rx_buffer_order);
                        if (unlikely(page == NULL))
                                return -ENOMEM;
@@ -189,9 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
                do {
                        index = rx_queue->added_count & rx_queue->ptr_mask;
                        rx_buf = efx_rx_buffer(rx_queue, index);
-                       rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
+                       rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
                        rx_buf->page = page;
-                       rx_buf->page_offset = page_offset + NET_IP_ALIGN;
+                       rx_buf->page_offset = page_offset + efx->rx_ip_align;
                        rx_buf->len = efx->rx_dma_len;
                        rx_buf->flags = 0;
                        ++rx_queue->added_count;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
  * this means this function must run from the NAPI handler, or be called
  * when NAPI is disabled.
  */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 
 
        do {
-               rc = efx_init_rx_buffers(rx_queue);
+               rc = efx_init_rx_buffers(rx_queue, atomic);
                if (unlikely(rc)) {
                        /* Ensure that we don't leave the rx queue empty */
                        if (rx_queue->added_count == rx_queue->removed_count)
This page took 0.059733 seconds and 5 git commands to generate.