Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
906bb26c | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
17 | #include <net/ip.h> | |
18 | #include <net/checksum.h> | |
19 | #include "net_driver.h" | |
8ceee660 | 20 | #include "efx.h" |
744093c9 | 21 | #include "nic.h" |
3273c2e8 | 22 | #include "selftest.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
25 | /* Number of RX descriptors pushed at once. */ | |
26 | #define EFX_RX_BATCH 8 | |
27 | ||
62b330ba SH |
28 | /* Maximum size of a buffer sharing a page */ |
29 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | |
30 | ||
8ceee660 BH |
31 | /* Size of buffer allocated for skb header area. */ |
32 | #define EFX_SKB_HEADERS 64u | |
33 | ||
34 | /* | |
35 | * rx_alloc_method - RX buffer allocation method | |
36 | * | |
37 | * This driver supports two methods for allocating and using RX buffers: | |
38 | * each RX buffer may be backed by an skb or by an order-n page. | |
39 | * | |
40 | * When LRO is in use then the second method has a lower overhead, | |
41 | * since we don't have to allocate then free skbs on reassembled frames. | |
42 | * | |
43 | * Values: | |
44 | * - RX_ALLOC_METHOD_AUTO = 0 | |
45 | * - RX_ALLOC_METHOD_SKB = 1 | |
46 | * - RX_ALLOC_METHOD_PAGE = 2 | |
47 | * | |
48 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
49 | * controlled by the parameters below. | |
50 | * | |
51 | * - Since pushing and popping descriptors are separated by the rx_queue | |
52 | * size, so the watermarks should be ~rxd_size. | |
53 | * - The performance win by using page-based allocation for LRO is less | |
54 | * than the performance hit of using page-based allocation of non-LRO, | |
55 | * so the watermarks should reflect this. | |
56 | * | |
57 | * Per channel we maintain a single variable, updated by each channel: | |
58 | * | |
59 | * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : | |
60 | * RX_ALLOC_FACTOR_SKB) | |
61 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
62 | * limits the hysteresis), and update the allocation strategy: | |
63 | * | |
64 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | |
65 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | |
66 | */ | |
c3c63365 | 67 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
8ceee660 BH |
68 | |
69 | #define RX_ALLOC_LEVEL_LRO 0x2000 | |
70 | #define RX_ALLOC_LEVEL_MAX 0x3000 | |
71 | #define RX_ALLOC_FACTOR_LRO 1 | |
72 | #define RX_ALLOC_FACTOR_SKB (-2) | |
73 | ||
74 | /* This is the percentage fill level below which new RX descriptors | |
75 | * will be added to the RX descriptor ring. | |
76 | */ | |
77 | static unsigned int rx_refill_threshold = 90; | |
78 | ||
79 | /* This is the percentage fill level to which an RX queue will be refilled | |
80 | * when the "RX refill threshold" is reached. | |
81 | */ | |
82 | static unsigned int rx_refill_limit = 95; | |
83 | ||
84 | /* | |
85 | * RX maximum head room required. | |
86 | * | |
87 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 88 | * pipelined receives. |
8ceee660 | 89 | */ |
62b330ba | 90 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 91 | |
55668611 BH |
92 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
93 | { | |
94 | /* Offset is always within one page, so we don't need to consider | |
95 | * the page order. | |
96 | */ | |
184be0c2 | 97 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); |
55668611 BH |
98 | } |
99 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
100 | { | |
101 | return PAGE_SIZE << efx->rx_buffer_order; | |
102 | } | |
8ceee660 | 103 | |
39c9cf07 BH |
104 | static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) |
105 | { | |
106 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 | |
604f6049 | 107 | return __le32_to_cpup((const __le32 *)(buf->data - 4)); |
39c9cf07 | 108 | #else |
604f6049 | 109 | const u8 *data = (const u8 *)(buf->data - 4); |
39c9cf07 BH |
110 | return ((u32)data[0] | |
111 | (u32)data[1] << 8 | | |
112 | (u32)data[2] << 16 | | |
113 | (u32)data[3] << 24); | |
114 | #endif | |
115 | } | |
116 | ||
8ceee660 | 117 | /** |
f7d6f379 | 118 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
8ceee660 BH |
119 | * |
120 | * @rx_queue: Efx RX queue | |
8ceee660 | 121 | * |
f7d6f379 SH |
122 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
123 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | |
124 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | |
125 | * buffers. | |
8ceee660 | 126 | */ |
f7d6f379 | 127 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
128 | { |
129 | struct efx_nic *efx = rx_queue->efx; | |
130 | struct net_device *net_dev = efx->net_dev; | |
f7d6f379 | 131 | struct efx_rx_buffer *rx_buf; |
8ceee660 | 132 | int skb_len = efx->rx_buffer_len; |
f7d6f379 | 133 | unsigned index, count; |
8ceee660 | 134 | |
f7d6f379 | 135 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
ecc910f5 | 136 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 137 | rx_buf = efx_rx_buffer(rx_queue, index); |
8ceee660 | 138 | |
f7d6f379 SH |
139 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); |
140 | if (unlikely(!rx_buf->skb)) | |
141 | return -ENOMEM; | |
142 | rx_buf->page = NULL; | |
8ceee660 | 143 | |
f7d6f379 SH |
144 | /* Adjust the SKB for padding and checksum */ |
145 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | |
146 | rx_buf->len = skb_len - NET_IP_ALIGN; | |
147 | rx_buf->data = (char *)rx_buf->skb->data; | |
148 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | |
149 | ||
150 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | |
151 | rx_buf->data, rx_buf->len, | |
152 | PCI_DMA_FROMDEVICE); | |
153 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | |
154 | rx_buf->dma_addr))) { | |
155 | dev_kfree_skb_any(rx_buf->skb); | |
156 | rx_buf->skb = NULL; | |
157 | return -EIO; | |
158 | } | |
8ceee660 | 159 | |
f7d6f379 SH |
160 | ++rx_queue->added_count; |
161 | ++rx_queue->alloc_skb_count; | |
8ceee660 BH |
162 | } |
163 | ||
164 | return 0; | |
165 | } | |
166 | ||
167 | /** | |
f7d6f379 | 168 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
169 | * |
170 | * @rx_queue: Efx RX queue | |
8ceee660 | 171 | * |
f7d6f379 SH |
172 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
173 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
174 | * code or 0 on success. If a single page can be split between two buffers, | |
175 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 176 | */ |
f7d6f379 | 177 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
178 | { |
179 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
180 | struct efx_rx_buffer *rx_buf; |
181 | struct page *page; | |
62b330ba SH |
182 | void *page_addr; |
183 | struct efx_rx_page_state *state; | |
f7d6f379 SH |
184 | dma_addr_t dma_addr; |
185 | unsigned index, count; | |
186 | ||
187 | /* We can split a page between two buffers */ | |
188 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
189 | ||
190 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
191 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
192 | efx->rx_buffer_order); | |
193 | if (unlikely(page == NULL)) | |
8ceee660 | 194 | return -ENOMEM; |
f7d6f379 SH |
195 | dma_addr = pci_map_page(efx->pci_dev, page, 0, |
196 | efx_rx_buf_size(efx), | |
8ceee660 | 197 | PCI_DMA_FROMDEVICE); |
8d8bb39b | 198 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
f7d6f379 | 199 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
200 | return -EIO; |
201 | } | |
62b330ba SH |
202 | page_addr = page_address(page); |
203 | state = page_addr; | |
204 | state->refcnt = 0; | |
205 | state->dma_addr = dma_addr; | |
206 | ||
207 | page_addr += sizeof(struct efx_rx_page_state); | |
208 | dma_addr += sizeof(struct efx_rx_page_state); | |
f7d6f379 SH |
209 | |
210 | split: | |
ecc910f5 | 211 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 212 | rx_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 213 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
f7d6f379 SH |
214 | rx_buf->skb = NULL; |
215 | rx_buf->page = page; | |
62b330ba | 216 | rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; |
f7d6f379 SH |
217 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
218 | ++rx_queue->added_count; | |
219 | ++rx_queue->alloc_page_count; | |
62b330ba | 220 | ++state->refcnt; |
f7d6f379 | 221 | |
62b330ba | 222 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
223 | /* Use the second half of the page */ |
224 | get_page(page); | |
225 | dma_addr += (PAGE_SIZE >> 1); | |
226 | page_addr += (PAGE_SIZE >> 1); | |
227 | ++count; | |
228 | goto split; | |
8ceee660 BH |
229 | } |
230 | } | |
231 | ||
8ceee660 BH |
232 | return 0; |
233 | } | |
234 | ||
4d566063 BH |
235 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
236 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
237 | { |
238 | if (rx_buf->page) { | |
62b330ba SH |
239 | struct efx_rx_page_state *state; |
240 | ||
8ceee660 | 241 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
f7d6f379 | 242 | |
62b330ba SH |
243 | state = page_address(rx_buf->page); |
244 | if (--state->refcnt == 0) { | |
f7d6f379 | 245 | pci_unmap_page(efx->pci_dev, |
62b330ba | 246 | state->dma_addr, |
55668611 BH |
247 | efx_rx_buf_size(efx), |
248 | PCI_DMA_FROMDEVICE); | |
8ceee660 BH |
249 | } |
250 | } else if (likely(rx_buf->skb)) { | |
251 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | |
252 | rx_buf->len, PCI_DMA_FROMDEVICE); | |
253 | } | |
254 | } | |
255 | ||
4d566063 BH |
256 | static void efx_free_rx_buffer(struct efx_nic *efx, |
257 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
258 | { |
259 | if (rx_buf->page) { | |
260 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
261 | rx_buf->page = NULL; | |
262 | } else if (likely(rx_buf->skb)) { | |
263 | dev_kfree_skb_any(rx_buf->skb); | |
264 | rx_buf->skb = NULL; | |
265 | } | |
266 | } | |
267 | ||
4d566063 BH |
268 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
269 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
270 | { |
271 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | |
272 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | |
273 | } | |
274 | ||
24455800 SH |
275 | /* Attempt to resurrect the other receive buffer that used to share this page, |
276 | * which had previously been passed up to the kernel and freed. */ | |
277 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
278 | struct efx_rx_buffer *rx_buf) | |
279 | { | |
62b330ba | 280 | struct efx_rx_page_state *state = page_address(rx_buf->page); |
24455800 | 281 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
282 | unsigned fill_level, index; |
283 | ||
284 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
285 | * we'd like to insert an additional descriptor whilst leaving | |
286 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
287 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
ecc910f5 | 288 | if (unlikely(fill_level > rx_queue->max_fill)) { |
62b330ba SH |
289 | /* We could place "state" on a list, and drain the list in |
290 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
291 | return; | |
292 | } | |
24455800 | 293 | |
62b330ba | 294 | ++state->refcnt; |
24455800 SH |
295 | get_page(rx_buf->page); |
296 | ||
ecc910f5 | 297 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 | 298 | new_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 299 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
24455800 SH |
300 | new_buf->skb = NULL; |
301 | new_buf->page = rx_buf->page; | |
62b330ba SH |
302 | new_buf->data = (void *) |
303 | ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); | |
24455800 SH |
304 | new_buf->len = rx_buf->len; |
305 | ++rx_queue->added_count; | |
306 | } | |
307 | ||
308 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
309 | * always room to add this buffer, because we've just popped a buffer. */ | |
310 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
311 | struct efx_rx_buffer *rx_buf) | |
312 | { | |
313 | struct efx_nic *efx = channel->efx; | |
f7d12cdc | 314 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
24455800 SH |
315 | struct efx_rx_buffer *new_buf; |
316 | unsigned index; | |
317 | ||
62b330ba SH |
318 | if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && |
319 | page_count(rx_buf->page) == 1) | |
320 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | |
24455800 | 321 | |
ecc910f5 | 322 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 SH |
323 | new_buf = efx_rx_buffer(rx_queue, index); |
324 | ||
325 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
326 | rx_buf->page = NULL; | |
327 | rx_buf->skb = NULL; | |
328 | ++rx_queue->added_count; | |
329 | } | |
330 | ||
8ceee660 BH |
331 | /** |
332 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
333 | * @rx_queue: RX descriptor queue | |
8ceee660 BH |
334 | * This will aim to fill the RX descriptor queue up to |
335 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | |
90d683af SH |
336 | * memory to do so, a slow fill will be scheduled. |
337 | * | |
338 | * The caller must provide serialisation (none is used here). In practise, | |
339 | * this means this function must run from the NAPI handler, or be called | |
340 | * when NAPI is disabled. | |
8ceee660 | 341 | */ |
90d683af | 342 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 343 | { |
ba1e8a35 | 344 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
f7d6f379 SH |
345 | unsigned fill_level; |
346 | int space, rc = 0; | |
8ceee660 | 347 | |
90d683af | 348 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 349 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
ecc910f5 | 350 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
8ceee660 | 351 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 352 | goto out; |
8ceee660 BH |
353 | |
354 | /* Record minimum fill level */ | |
b3475645 | 355 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
356 | if (fill_level) |
357 | rx_queue->min_fill = fill_level; | |
b3475645 | 358 | } |
8ceee660 | 359 | |
8ceee660 BH |
360 | space = rx_queue->fast_fill_limit - fill_level; |
361 | if (space < EFX_RX_BATCH) | |
24455800 | 362 | goto out; |
8ceee660 | 363 | |
62776d03 BH |
364 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
365 | "RX queue %d fast-filling descriptor ring from" | |
366 | " level %d to level %d using %s allocation\n", | |
ba1e8a35 BH |
367 | efx_rx_queue_index(rx_queue), fill_level, |
368 | rx_queue->fast_fill_limit, | |
62776d03 | 369 | channel->rx_alloc_push_pages ? "page" : "skb"); |
8ceee660 BH |
370 | |
371 | do { | |
f7d6f379 SH |
372 | if (channel->rx_alloc_push_pages) |
373 | rc = efx_init_rx_buffers_page(rx_queue); | |
374 | else | |
375 | rc = efx_init_rx_buffers_skb(rx_queue); | |
376 | if (unlikely(rc)) { | |
377 | /* Ensure that we don't leave the rx queue empty */ | |
378 | if (rx_queue->added_count == rx_queue->removed_count) | |
379 | efx_schedule_slow_fill(rx_queue); | |
380 | goto out; | |
8ceee660 BH |
381 | } |
382 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
383 | ||
62776d03 BH |
384 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
385 | "RX queue %d fast-filled descriptor ring " | |
ba1e8a35 | 386 | "to level %d\n", efx_rx_queue_index(rx_queue), |
62776d03 | 387 | rx_queue->added_count - rx_queue->removed_count); |
8ceee660 BH |
388 | |
389 | out: | |
24455800 SH |
390 | if (rx_queue->notified_count != rx_queue->added_count) |
391 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
392 | } |
393 | ||
90d683af | 394 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 395 | { |
90d683af | 396 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
ba1e8a35 | 397 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 398 | |
90d683af SH |
399 | /* Post an event to cause NAPI to run and refill the queue */ |
400 | efx_nic_generate_fill_event(channel); | |
8ceee660 | 401 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
402 | } |
403 | ||
4d566063 BH |
404 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
405 | struct efx_rx_buffer *rx_buf, | |
406 | int len, bool *discard, | |
407 | bool *leak_packet) | |
8ceee660 BH |
408 | { |
409 | struct efx_nic *efx = rx_queue->efx; | |
410 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
411 | ||
412 | if (likely(len <= max_len)) | |
413 | return; | |
414 | ||
415 | /* The packet must be discarded, but this is only a fatal error | |
416 | * if the caller indicated it was | |
417 | */ | |
dc8cfa55 | 418 | *discard = true; |
8ceee660 BH |
419 | |
420 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
62776d03 BH |
421 | if (net_ratelimit()) |
422 | netif_err(efx, rx_err, efx->net_dev, | |
423 | " RX queue %d seriously overlength " | |
424 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
ba1e8a35 | 425 | efx_rx_queue_index(rx_queue), len, max_len, |
62776d03 | 426 | efx->type->rx_buffer_padding); |
8ceee660 BH |
427 | /* If this buffer was skb-allocated, then the meta |
428 | * data at the end of the skb will be trashed. So | |
429 | * we have no choice but to leak the fragment. | |
430 | */ | |
431 | *leak_packet = (rx_buf->skb != NULL); | |
432 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | |
433 | } else { | |
62776d03 BH |
434 | if (net_ratelimit()) |
435 | netif_err(efx, rx_err, efx->net_dev, | |
436 | " RX queue %d overlength RX event " | |
437 | "(0x%x > 0x%x)\n", | |
ba1e8a35 | 438 | efx_rx_queue_index(rx_queue), len, max_len); |
8ceee660 BH |
439 | } |
440 | ||
ba1e8a35 | 441 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
8ceee660 BH |
442 | } |
443 | ||
444 | /* Pass a received packet up through the generic LRO stack | |
445 | * | |
446 | * Handles driverlink veto, and passes the fragment up via | |
447 | * the appropriate LRO method | |
448 | */ | |
4d566063 | 449 | static void efx_rx_packet_lro(struct efx_channel *channel, |
345056af BH |
450 | struct efx_rx_buffer *rx_buf, |
451 | bool checksummed) | |
8ceee660 | 452 | { |
da3bc071 | 453 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 454 | gro_result_t gro_result; |
8ceee660 BH |
455 | |
456 | /* Pass the skb/page into the LRO engine */ | |
457 | if (rx_buf->page) { | |
39c9cf07 | 458 | struct efx_nic *efx = channel->efx; |
1241e951 BH |
459 | struct page *page = rx_buf->page; |
460 | struct sk_buff *skb; | |
8ceee660 | 461 | |
1241e951 BH |
462 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
463 | rx_buf->page = NULL; | |
464 | ||
465 | skb = napi_get_frags(napi); | |
76620aaf | 466 | if (!skb) { |
1241e951 BH |
467 | put_page(page); |
468 | return; | |
76620aaf HX |
469 | } |
470 | ||
39c9cf07 BH |
471 | if (efx->net_dev->features & NETIF_F_RXHASH) |
472 | skb->rxhash = efx_rx_buf_hash(rx_buf); | |
39c9cf07 | 473 | |
1241e951 | 474 | skb_shinfo(skb)->frags[0].page = page; |
76620aaf HX |
475 | skb_shinfo(skb)->frags[0].page_offset = |
476 | efx_rx_buf_offset(rx_buf); | |
477 | skb_shinfo(skb)->frags[0].size = rx_buf->len; | |
478 | skb_shinfo(skb)->nr_frags = 1; | |
479 | ||
480 | skb->len = rx_buf->len; | |
481 | skb->data_len = rx_buf->len; | |
482 | skb->truesize += rx_buf->len; | |
345056af BH |
483 | skb->ip_summed = |
484 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | |
8ceee660 | 485 | |
3eadb7b0 BH |
486 | skb_record_rx_queue(skb, channel->channel); |
487 | ||
18e1d2be | 488 | gro_result = napi_gro_frags(napi); |
8ceee660 | 489 | } else { |
1241e951 | 490 | struct sk_buff *skb = rx_buf->skb; |
8ceee660 | 491 | |
1241e951 BH |
492 | EFX_BUG_ON_PARANOID(!skb); |
493 | EFX_BUG_ON_PARANOID(!checksummed); | |
8ceee660 | 494 | rx_buf->skb = NULL; |
1241e951 BH |
495 | |
496 | gro_result = napi_gro_receive(napi, skb); | |
8ceee660 | 497 | } |
18e1d2be BH |
498 | |
499 | if (gro_result == GRO_NORMAL) { | |
500 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
501 | } else if (gro_result != GRO_DROP) { | |
502 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | |
503 | channel->irq_mod_score += 2; | |
504 | } | |
8ceee660 BH |
505 | } |
506 | ||
8ceee660 | 507 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
dc8cfa55 | 508 | unsigned int len, bool checksummed, bool discard) |
8ceee660 BH |
509 | { |
510 | struct efx_nic *efx = rx_queue->efx; | |
ba1e8a35 | 511 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 512 | struct efx_rx_buffer *rx_buf; |
dc8cfa55 | 513 | bool leak_packet = false; |
8ceee660 BH |
514 | |
515 | rx_buf = efx_rx_buffer(rx_queue, index); | |
516 | EFX_BUG_ON_PARANOID(!rx_buf->data); | |
517 | EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); | |
518 | EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); | |
519 | ||
520 | /* This allows the refill path to post another buffer. | |
521 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
522 | * isn't overwritten yet. | |
523 | */ | |
524 | rx_queue->removed_count++; | |
525 | ||
526 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
527 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | |
528 | &discard, &leak_packet); | |
529 | ||
62776d03 BH |
530 | netif_vdbg(efx, rx_status, efx->net_dev, |
531 | "RX queue %d received id %x at %llx+%x %s%s\n", | |
ba1e8a35 | 532 | efx_rx_queue_index(rx_queue), index, |
62776d03 BH |
533 | (unsigned long long)rx_buf->dma_addr, len, |
534 | (checksummed ? " [SUMMED]" : ""), | |
535 | (discard ? " [DISCARD]" : "")); | |
8ceee660 BH |
536 | |
537 | /* Discard packet, if instructed to do so */ | |
538 | if (unlikely(discard)) { | |
539 | if (unlikely(leak_packet)) | |
24455800 | 540 | channel->n_skbuff_leaks++; |
8ceee660 | 541 | else |
24455800 SH |
542 | efx_recycle_rx_buffer(channel, rx_buf); |
543 | ||
544 | /* Don't hold off the previous receive */ | |
545 | rx_buf = NULL; | |
546 | goto out; | |
8ceee660 BH |
547 | } |
548 | ||
549 | /* Release card resources - assumes all RX buffers consumed in-order | |
550 | * per RX queue | |
551 | */ | |
552 | efx_unmap_rx_buffer(efx, rx_buf); | |
553 | ||
554 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
555 | * the time we look at it. | |
556 | */ | |
557 | prefetch(rx_buf->data); | |
558 | ||
559 | /* Pipeline receives so that we give time for packet headers to be | |
560 | * prefetched into cache. | |
561 | */ | |
562 | rx_buf->len = len; | |
24455800 | 563 | out: |
ba1e8a35 BH |
564 | if (channel->rx_pkt) |
565 | __efx_rx_packet(channel, | |
566 | channel->rx_pkt, channel->rx_pkt_csummed); | |
567 | channel->rx_pkt = rx_buf; | |
568 | channel->rx_pkt_csummed = checksummed; | |
8ceee660 BH |
569 | } |
570 | ||
571 | /* Handle a received packet. Second half: Touches packet payload. */ | |
572 | void __efx_rx_packet(struct efx_channel *channel, | |
dc8cfa55 | 573 | struct efx_rx_buffer *rx_buf, bool checksummed) |
8ceee660 BH |
574 | { |
575 | struct efx_nic *efx = channel->efx; | |
576 | struct sk_buff *skb; | |
8ceee660 | 577 | |
604f6049 BH |
578 | rx_buf->data += efx->type->rx_buffer_hash_size; |
579 | rx_buf->len -= efx->type->rx_buffer_hash_size; | |
580 | ||
3273c2e8 BH |
581 | /* If we're in loopback test, then pass the packet directly to the |
582 | * loopback layer, and free the rx_buf here | |
583 | */ | |
584 | if (unlikely(efx->loopback_selftest)) { | |
585 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | |
586 | efx_free_rx_buffer(efx, rx_buf); | |
d96d7dc9 | 587 | return; |
3273c2e8 BH |
588 | } |
589 | ||
8ceee660 BH |
590 | if (rx_buf->skb) { |
591 | prefetch(skb_shinfo(rx_buf->skb)); | |
592 | ||
604f6049 | 593 | skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); |
8ceee660 BH |
594 | skb_put(rx_buf->skb, rx_buf->len); |
595 | ||
39c9cf07 BH |
596 | if (efx->net_dev->features & NETIF_F_RXHASH) |
597 | rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); | |
39c9cf07 | 598 | |
8ceee660 BH |
599 | /* Move past the ethernet header. rx_buf->data still points |
600 | * at the ethernet header */ | |
601 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | |
602 | efx->net_dev); | |
3eadb7b0 BH |
603 | |
604 | skb_record_rx_queue(rx_buf->skb, channel->channel); | |
8ceee660 BH |
605 | } |
606 | ||
da3bc071 | 607 | if (likely(checksummed || rx_buf->page)) { |
345056af | 608 | efx_rx_packet_lro(channel, rx_buf, checksummed); |
d96d7dc9 | 609 | return; |
8ceee660 BH |
610 | } |
611 | ||
da3bc071 HX |
612 | /* We now own the SKB */ |
613 | skb = rx_buf->skb; | |
614 | rx_buf->skb = NULL; | |
8ceee660 BH |
615 | EFX_BUG_ON_PARANOID(!skb); |
616 | ||
617 | /* Set the SKB flags */ | |
bc8acf2c | 618 | skb_checksum_none_assert(skb); |
8ceee660 BH |
619 | |
620 | /* Pass the packet up */ | |
621 | netif_receive_skb(skb); | |
622 | ||
623 | /* Update allocation strategy method */ | |
624 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
8ceee660 BH |
625 | } |
626 | ||
627 | void efx_rx_strategy(struct efx_channel *channel) | |
628 | { | |
629 | enum efx_rx_alloc_method method = rx_alloc_method; | |
630 | ||
631 | /* Only makes sense to use page based allocation if LRO is enabled */ | |
da3bc071 | 632 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
8ceee660 BH |
633 | method = RX_ALLOC_METHOD_SKB; |
634 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
635 | /* Constrain the rx_alloc_level */ | |
636 | if (channel->rx_alloc_level < 0) | |
637 | channel->rx_alloc_level = 0; | |
638 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
639 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
640 | ||
641 | /* Decide on the allocation method */ | |
642 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? | |
643 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | |
644 | } | |
645 | ||
646 | /* Push the option */ | |
647 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
648 | } | |
649 | ||
650 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
651 | { | |
652 | struct efx_nic *efx = rx_queue->efx; | |
ecc910f5 | 653 | unsigned int entries; |
8ceee660 BH |
654 | int rc; |
655 | ||
ecc910f5 SH |
656 | /* Create the smallest power-of-two aligned ring */ |
657 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | |
658 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
659 | rx_queue->ptr_mask = entries - 1; | |
660 | ||
62776d03 | 661 | netif_dbg(efx, probe, efx->net_dev, |
ecc910f5 SH |
662 | "creating RX queue %d size %#x mask %#x\n", |
663 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | |
664 | rx_queue->ptr_mask); | |
8ceee660 BH |
665 | |
666 | /* Allocate RX buffers */ | |
ecc910f5 SH |
667 | rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer), |
668 | GFP_KERNEL); | |
8831da7b BH |
669 | if (!rx_queue->buffer) |
670 | return -ENOMEM; | |
8ceee660 | 671 | |
152b6a62 | 672 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
673 | if (rc) { |
674 | kfree(rx_queue->buffer); | |
675 | rx_queue->buffer = NULL; | |
676 | } | |
8ceee660 BH |
677 | return rc; |
678 | } | |
679 | ||
bc3c90a2 | 680 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 681 | { |
ecc910f5 | 682 | struct efx_nic *efx = rx_queue->efx; |
8ceee660 BH |
683 | unsigned int max_fill, trigger, limit; |
684 | ||
62776d03 | 685 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 686 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 BH |
687 | |
688 | /* Initialise ptr fields */ | |
689 | rx_queue->added_count = 0; | |
690 | rx_queue->notified_count = 0; | |
691 | rx_queue->removed_count = 0; | |
692 | rx_queue->min_fill = -1U; | |
8ceee660 BH |
693 | |
694 | /* Initialise limit fields */ | |
ecc910f5 | 695 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
8ceee660 BH |
696 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
697 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | |
698 | ||
699 | rx_queue->max_fill = max_fill; | |
700 | rx_queue->fast_fill_trigger = trigger; | |
701 | rx_queue->fast_fill_limit = limit; | |
702 | ||
703 | /* Set up RX descriptor ring */ | |
152b6a62 | 704 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
705 | } |
706 | ||
707 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
708 | { | |
709 | int i; | |
710 | struct efx_rx_buffer *rx_buf; | |
711 | ||
62776d03 | 712 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 713 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 714 | |
90d683af | 715 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 716 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
717 | |
718 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
719 | if (rx_queue->buffer) { | |
ecc910f5 | 720 | for (i = 0; i <= rx_queue->ptr_mask; i++) { |
8ceee660 BH |
721 | rx_buf = efx_rx_buffer(rx_queue, i); |
722 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
723 | } | |
724 | } | |
8ceee660 BH |
725 | } |
726 | ||
727 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
728 | { | |
62776d03 | 729 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 730 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 731 | |
152b6a62 | 732 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
733 | |
734 | kfree(rx_queue->buffer); | |
735 | rx_queue->buffer = NULL; | |
8ceee660 BH |
736 | } |
737 | ||
8ceee660 BH |
738 | |
739 | module_param(rx_alloc_method, int, 0644); | |
740 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
741 | ||
742 | module_param(rx_refill_threshold, uint, 0444); | |
743 | MODULE_PARM_DESC(rx_refill_threshold, | |
744 | "RX descriptor ring fast/slow fill threshold (%)"); | |
745 |