Commit | Line | Data |
---|---|---|
b3890e30 AD |
1 | /* Intel Ethernet Switch Host Interface Driver |
2 | * Copyright(c) 2013 - 2014 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include <linux/types.h> | |
22 | #include <linux/module.h> | |
23 | #include <net/ipv6.h> | |
24 | #include <net/ip.h> | |
25 | #include <net/tcp.h> | |
26 | #include <linux/if_macvlan.h> | |
b101c962 | 27 | #include <linux/prefetch.h> |
b3890e30 AD |
28 | |
29 | #include "fm10k.h" | |
30 | ||
f4f88c6d | 31 | #define DRV_VERSION "0.15.2-k" |
b3890e30 AD |
32 | const char fm10k_driver_version[] = DRV_VERSION; |
33 | char fm10k_driver_name[] = "fm10k"; | |
34 | static const char fm10k_driver_string[] = | |
35 | "Intel(R) Ethernet Switch Host Interface Driver"; | |
36 | static const char fm10k_copyright[] = | |
37 | "Copyright (c) 2013 Intel Corporation."; | |
38 | ||
39 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | |
40 | MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); | |
41 | MODULE_LICENSE("GPL"); | |
42 | MODULE_VERSION(DRV_VERSION); | |
43 | ||
b382bb1b JK |
44 | /* single workqueue for entire fm10k driver */ |
45 | struct workqueue_struct *fm10k_workqueue = NULL; | |
46 | ||
6d2ce900 AD |
47 | /** |
48 | * fm10k_init_module - Driver Registration Routine | |
b3890e30 AD |
49 | * |
50 | * fm10k_init_module is the first routine called when the driver is | |
51 | * loaded. All it does is register with the PCI subsystem. | |
52 | **/ | |
53 | static int __init fm10k_init_module(void) | |
54 | { | |
55 | pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); | |
56 | pr_info("%s\n", fm10k_copyright); | |
57 | ||
b382bb1b JK |
58 | /* create driver workqueue */ |
59 | if (!fm10k_workqueue) | |
60 | fm10k_workqueue = create_workqueue("fm10k"); | |
61 | ||
7461fd91 AD |
62 | fm10k_dbg_init(); |
63 | ||
b3890e30 AD |
64 | return fm10k_register_pci_driver(); |
65 | } | |
66 | module_init(fm10k_init_module); | |
67 | ||
68 | /** | |
69 | * fm10k_exit_module - Driver Exit Cleanup Routine | |
70 | * | |
71 | * fm10k_exit_module is called just before the driver is removed | |
72 | * from memory. | |
73 | **/ | |
74 | static void __exit fm10k_exit_module(void) | |
75 | { | |
76 | fm10k_unregister_pci_driver(); | |
7461fd91 AD |
77 | |
78 | fm10k_dbg_exit(); | |
b382bb1b JK |
79 | |
80 | /* destroy driver workqueue */ | |
81 | flush_workqueue(fm10k_workqueue); | |
82 | destroy_workqueue(fm10k_workqueue); | |
83 | fm10k_workqueue = NULL; | |
b3890e30 AD |
84 | } |
85 | module_exit(fm10k_exit_module); | |
18283cad | 86 | |
b101c962 AD |
87 | static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, |
88 | struct fm10k_rx_buffer *bi) | |
89 | { | |
90 | struct page *page = bi->page; | |
91 | dma_addr_t dma; | |
92 | ||
93 | /* Only page will be NULL if buffer was consumed */ | |
94 | if (likely(page)) | |
95 | return true; | |
96 | ||
97 | /* alloc new page for storage */ | |
42b17f09 | 98 | page = dev_alloc_page(); |
b101c962 AD |
99 | if (unlikely(!page)) { |
100 | rx_ring->rx_stats.alloc_failed++; | |
101 | return false; | |
102 | } | |
103 | ||
104 | /* map page for use */ | |
105 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
106 | ||
107 | /* if mapping failed free memory back to system since | |
108 | * there isn't much point in holding memory we can't use | |
109 | */ | |
110 | if (dma_mapping_error(rx_ring->dev, dma)) { | |
111 | __free_page(page); | |
b101c962 AD |
112 | |
113 | rx_ring->rx_stats.alloc_failed++; | |
114 | return false; | |
115 | } | |
116 | ||
117 | bi->dma = dma; | |
118 | bi->page = page; | |
119 | bi->page_offset = 0; | |
120 | ||
121 | return true; | |
122 | } | |
123 | ||
124 | /** | |
125 | * fm10k_alloc_rx_buffers - Replace used receive buffers | |
126 | * @rx_ring: ring to place buffers on | |
127 | * @cleaned_count: number of buffers to replace | |
128 | **/ | |
129 | void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) | |
130 | { | |
131 | union fm10k_rx_desc *rx_desc; | |
132 | struct fm10k_rx_buffer *bi; | |
133 | u16 i = rx_ring->next_to_use; | |
134 | ||
135 | /* nothing to do */ | |
136 | if (!cleaned_count) | |
137 | return; | |
138 | ||
139 | rx_desc = FM10K_RX_DESC(rx_ring, i); | |
140 | bi = &rx_ring->rx_buffer[i]; | |
141 | i -= rx_ring->count; | |
142 | ||
143 | do { | |
144 | if (!fm10k_alloc_mapped_page(rx_ring, bi)) | |
145 | break; | |
146 | ||
147 | /* Refresh the desc even if buffer_addrs didn't change | |
148 | * because each write-back erases this info. | |
149 | */ | |
150 | rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); | |
151 | ||
152 | rx_desc++; | |
153 | bi++; | |
154 | i++; | |
155 | if (unlikely(!i)) { | |
156 | rx_desc = FM10K_RX_DESC(rx_ring, 0); | |
157 | bi = rx_ring->rx_buffer; | |
158 | i -= rx_ring->count; | |
159 | } | |
160 | ||
ba5b8dcd AD |
161 | /* clear the status bits for the next_to_use descriptor */ |
162 | rx_desc->d.staterr = 0; | |
b101c962 AD |
163 | |
164 | cleaned_count--; | |
165 | } while (cleaned_count); | |
166 | ||
167 | i += rx_ring->count; | |
168 | ||
169 | if (rx_ring->next_to_use != i) { | |
170 | /* record the next descriptor to use */ | |
171 | rx_ring->next_to_use = i; | |
172 | ||
173 | /* update next to alloc since we have filled the ring */ | |
174 | rx_ring->next_to_alloc = i; | |
175 | ||
176 | /* Force memory writes to complete before letting h/w | |
177 | * know there are new descriptors to fetch. (Only | |
178 | * applicable for weak-ordered memory model archs, | |
179 | * such as IA-64). | |
180 | */ | |
181 | wmb(); | |
182 | ||
183 | /* notify hardware of new descriptors */ | |
184 | writel(i, rx_ring->tail); | |
185 | } | |
186 | } | |
187 | ||
188 | /** | |
189 | * fm10k_reuse_rx_page - page flip buffer and store it back on the ring | |
190 | * @rx_ring: rx descriptor ring to store buffers on | |
191 | * @old_buff: donor buffer to have page reused | |
192 | * | |
193 | * Synchronizes page for reuse by the interface | |
194 | **/ | |
195 | static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |
196 | struct fm10k_rx_buffer *old_buff) | |
197 | { | |
198 | struct fm10k_rx_buffer *new_buff; | |
199 | u16 nta = rx_ring->next_to_alloc; | |
200 | ||
201 | new_buff = &rx_ring->rx_buffer[nta]; | |
202 | ||
203 | /* update, and store next to alloc */ | |
204 | nta++; | |
205 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
206 | ||
207 | /* transfer page from old buffer to new buffer */ | |
ba5b8dcd | 208 | *new_buff = *old_buff; |
b101c962 AD |
209 | |
210 | /* sync the buffer for use by the device */ | |
211 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | |
212 | old_buff->page_offset, | |
213 | FM10K_RX_BUFSZ, | |
214 | DMA_FROM_DEVICE); | |
215 | } | |
216 | ||
ba5b8dcd AD |
217 | static inline bool fm10k_page_is_reserved(struct page *page) |
218 | { | |
2f064f34 | 219 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
ba5b8dcd AD |
220 | } |
221 | ||
b101c962 AD |
222 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, |
223 | struct page *page, | |
de445199 | 224 | unsigned int __maybe_unused truesize) |
b101c962 AD |
225 | { |
226 | /* avoid re-using remote pages */ | |
ba5b8dcd | 227 | if (unlikely(fm10k_page_is_reserved(page))) |
b101c962 AD |
228 | return false; |
229 | ||
230 | #if (PAGE_SIZE < 8192) | |
231 | /* if we are only owner of page we can reuse it */ | |
232 | if (unlikely(page_count(page) != 1)) | |
233 | return false; | |
234 | ||
235 | /* flip page offset to other buffer */ | |
236 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; | |
b101c962 AD |
237 | #else |
238 | /* move offset up to the next cache line */ | |
239 | rx_buffer->page_offset += truesize; | |
240 | ||
241 | if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) | |
242 | return false; | |
b101c962 AD |
243 | #endif |
244 | ||
ba5b8dcd AD |
245 | /* Even if we own the page, we are not allowed to use atomic_set() |
246 | * This would break get_page_unless_zero() users. | |
247 | */ | |
248 | atomic_inc(&page->_count); | |
249 | ||
b101c962 AD |
250 | return true; |
251 | } | |
252 | ||
253 | /** | |
254 | * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff | |
b101c962 AD |
255 | * @rx_buffer: buffer containing page to add |
256 | * @rx_desc: descriptor containing length of buffer written by hardware | |
257 | * @skb: sk_buff to place the data into | |
258 | * | |
259 | * This function will add the data contained in rx_buffer->page to the skb. | |
260 | * This is done either through a direct copy if the data in the buffer is | |
261 | * less than the skb header size, otherwise it will just attach the page as | |
262 | * a frag to the skb. | |
263 | * | |
264 | * The function will then update the page offset if necessary and return | |
265 | * true if the buffer can be reused by the interface. | |
266 | **/ | |
de445199 | 267 | static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, |
b101c962 AD |
268 | union fm10k_rx_desc *rx_desc, |
269 | struct sk_buff *skb) | |
270 | { | |
271 | struct page *page = rx_buffer->page; | |
1a8782e5 | 272 | unsigned char *va = page_address(page) + rx_buffer->page_offset; |
b101c962 AD |
273 | unsigned int size = le16_to_cpu(rx_desc->w.length); |
274 | #if (PAGE_SIZE < 8192) | |
275 | unsigned int truesize = FM10K_RX_BUFSZ; | |
276 | #else | |
1a8782e5 | 277 | unsigned int truesize = SKB_DATA_ALIGN(size); |
b101c962 | 278 | #endif |
1a8782e5 | 279 | unsigned int pull_len; |
b101c962 | 280 | |
1a8782e5 AD |
281 | if (unlikely(skb_is_nonlinear(skb))) |
282 | goto add_tail_frag; | |
b101c962 | 283 | |
1a8782e5 | 284 | if (likely(size <= FM10K_RX_HDR_LEN)) { |
b101c962 AD |
285 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
286 | ||
ba5b8dcd AD |
287 | /* page is not reserved, we can reuse buffer as-is */ |
288 | if (likely(!fm10k_page_is_reserved(page))) | |
b101c962 AD |
289 | return true; |
290 | ||
291 | /* this page cannot be reused so discard it */ | |
ba5b8dcd | 292 | __free_page(page); |
b101c962 AD |
293 | return false; |
294 | } | |
295 | ||
1a8782e5 AD |
296 | /* we need the header to contain the greater of either ETH_HLEN or |
297 | * 60 bytes if the skb->len is less than 60 for skb_pad. | |
298 | */ | |
299 | pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); | |
300 | ||
301 | /* align pull length to size of long to optimize memcpy performance */ | |
302 | memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); | |
303 | ||
304 | /* update all of the pointers */ | |
305 | va += pull_len; | |
306 | size -= pull_len; | |
307 | ||
308 | add_tail_frag: | |
b101c962 | 309 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
1a8782e5 | 310 | (unsigned long)va & ~PAGE_MASK, size, truesize); |
b101c962 AD |
311 | |
312 | return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); | |
313 | } | |
314 | ||
315 | static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, | |
316 | union fm10k_rx_desc *rx_desc, | |
317 | struct sk_buff *skb) | |
318 | { | |
319 | struct fm10k_rx_buffer *rx_buffer; | |
320 | struct page *page; | |
321 | ||
322 | rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; | |
b101c962 AD |
323 | page = rx_buffer->page; |
324 | prefetchw(page); | |
325 | ||
326 | if (likely(!skb)) { | |
327 | void *page_addr = page_address(page) + | |
328 | rx_buffer->page_offset; | |
329 | ||
330 | /* prefetch first cache line of first page */ | |
331 | prefetch(page_addr); | |
332 | #if L1_CACHE_BYTES < 128 | |
333 | prefetch(page_addr + L1_CACHE_BYTES); | |
334 | #endif | |
335 | ||
336 | /* allocate a skb to store the frags */ | |
67fd893e AD |
337 | skb = napi_alloc_skb(&rx_ring->q_vector->napi, |
338 | FM10K_RX_HDR_LEN); | |
b101c962 AD |
339 | if (unlikely(!skb)) { |
340 | rx_ring->rx_stats.alloc_failed++; | |
341 | return NULL; | |
342 | } | |
343 | ||
344 | /* we will be copying header into skb->data in | |
345 | * pskb_may_pull so it is in our interest to prefetch | |
346 | * it now to avoid a possible cache miss | |
347 | */ | |
348 | prefetchw(skb->data); | |
349 | } | |
350 | ||
351 | /* we are reusing so sync this buffer for CPU use */ | |
352 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
353 | rx_buffer->dma, | |
354 | rx_buffer->page_offset, | |
355 | FM10K_RX_BUFSZ, | |
356 | DMA_FROM_DEVICE); | |
357 | ||
358 | /* pull page into skb */ | |
de445199 | 359 | if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { |
b101c962 AD |
360 | /* hand second half of page back to the ring */ |
361 | fm10k_reuse_rx_page(rx_ring, rx_buffer); | |
362 | } else { | |
363 | /* we are not reusing the buffer so unmap it */ | |
364 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | |
365 | PAGE_SIZE, DMA_FROM_DEVICE); | |
366 | } | |
367 | ||
368 | /* clear contents of rx_buffer */ | |
369 | rx_buffer->page = NULL; | |
370 | ||
371 | return skb; | |
372 | } | |
373 | ||
76a540d4 AD |
374 | static inline void fm10k_rx_checksum(struct fm10k_ring *ring, |
375 | union fm10k_rx_desc *rx_desc, | |
376 | struct sk_buff *skb) | |
377 | { | |
378 | skb_checksum_none_assert(skb); | |
379 | ||
380 | /* Rx checksum disabled via ethtool */ | |
381 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) | |
382 | return; | |
383 | ||
384 | /* TCP/UDP checksum error bit is set */ | |
385 | if (fm10k_test_staterr(rx_desc, | |
386 | FM10K_RXD_STATUS_L4E | | |
387 | FM10K_RXD_STATUS_L4E2 | | |
388 | FM10K_RXD_STATUS_IPE | | |
389 | FM10K_RXD_STATUS_IPE2)) { | |
390 | ring->rx_stats.csum_err++; | |
391 | return; | |
392 | } | |
393 | ||
394 | /* It must be a TCP or UDP packet with a valid checksum */ | |
395 | if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) | |
396 | skb->encapsulation = true; | |
397 | else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) | |
398 | return; | |
399 | ||
400 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
80043f3b JK |
401 | |
402 | ring->rx_stats.csum_good++; | |
76a540d4 AD |
403 | } |
404 | ||
405 | #define FM10K_RSS_L4_TYPES_MASK \ | |
406 | ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ | |
407 | (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ | |
408 | (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ | |
409 | (1ul << FM10K_RSSTYPE_IPV6_UDP)) | |
410 | ||
411 | static inline void fm10k_rx_hash(struct fm10k_ring *ring, | |
412 | union fm10k_rx_desc *rx_desc, | |
413 | struct sk_buff *skb) | |
414 | { | |
415 | u16 rss_type; | |
416 | ||
417 | if (!(ring->netdev->features & NETIF_F_RXHASH)) | |
418 | return; | |
419 | ||
420 | rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; | |
421 | if (!rss_type) | |
422 | return; | |
423 | ||
424 | skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), | |
425 | (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? | |
426 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); | |
427 | } | |
428 | ||
a211e013 AD |
429 | static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, |
430 | union fm10k_rx_desc *rx_desc, | |
431 | struct sk_buff *skb) | |
432 | { | |
433 | struct fm10k_intfc *interface = rx_ring->q_vector->interface; | |
434 | ||
435 | FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; | |
436 | ||
437 | if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) | |
438 | fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), | |
439 | le64_to_cpu(rx_desc->q.timestamp)); | |
440 | } | |
441 | ||
5cd5e2e9 | 442 | static void fm10k_type_trans(struct fm10k_ring *rx_ring, |
de445199 | 443 | union fm10k_rx_desc __maybe_unused *rx_desc, |
5cd5e2e9 AD |
444 | struct sk_buff *skb) |
445 | { | |
446 | struct net_device *dev = rx_ring->netdev; | |
447 | struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); | |
448 | ||
449 | /* check to see if DGLORT belongs to a MACVLAN */ | |
450 | if (l2_accel) { | |
451 | u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; | |
452 | ||
453 | idx -= l2_accel->dglort; | |
454 | if (idx < l2_accel->size && l2_accel->macvlan[idx]) | |
455 | dev = l2_accel->macvlan[idx]; | |
456 | else | |
457 | l2_accel = NULL; | |
458 | } | |
459 | ||
460 | skb->protocol = eth_type_trans(skb, dev); | |
461 | ||
462 | if (!l2_accel) | |
463 | return; | |
464 | ||
465 | /* update MACVLAN statistics */ | |
466 | macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, | |
467 | !!(rx_desc->w.hdr_info & | |
468 | cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); | |
469 | } | |
470 | ||
b101c962 AD |
471 | /** |
472 | * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor | |
473 | * @rx_ring: rx descriptor ring packet is being transacted on | |
474 | * @rx_desc: pointer to the EOP Rx descriptor | |
475 | * @skb: pointer to current skb being populated | |
476 | * | |
477 | * This function checks the ring, descriptor, and packet information in | |
478 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and | |
479 | * other fields within the skb. | |
480 | **/ | |
481 | static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, | |
482 | union fm10k_rx_desc *rx_desc, | |
483 | struct sk_buff *skb) | |
484 | { | |
485 | unsigned int len = skb->len; | |
486 | ||
76a540d4 AD |
487 | fm10k_rx_hash(rx_ring, rx_desc, skb); |
488 | ||
489 | fm10k_rx_checksum(rx_ring, rx_desc, skb); | |
490 | ||
a211e013 AD |
491 | fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); |
492 | ||
b101c962 AD |
493 | FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; |
494 | ||
495 | skb_record_rx_queue(skb, rx_ring->queue_index); | |
496 | ||
497 | FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; | |
498 | ||
499 | if (rx_desc->w.vlan) { | |
500 | u16 vid = le16_to_cpu(rx_desc->w.vlan); | |
501 | ||
e71c9318 | 502 | if ((vid & VLAN_VID_MASK) != rx_ring->vid) |
b101c962 | 503 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
e71c9318 JK |
504 | else if (vid & VLAN_PRIO_MASK) |
505 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
506 | vid & VLAN_PRIO_MASK); | |
b101c962 AD |
507 | } |
508 | ||
5cd5e2e9 | 509 | fm10k_type_trans(rx_ring, rx_desc, skb); |
b101c962 AD |
510 | |
511 | return len; | |
512 | } | |
513 | ||
514 | /** | |
515 | * fm10k_is_non_eop - process handling of non-EOP buffers | |
516 | * @rx_ring: Rx ring being processed | |
517 | * @rx_desc: Rx descriptor for current buffer | |
518 | * | |
519 | * This function updates next to clean. If the buffer is an EOP buffer | |
520 | * this function exits returning false, otherwise it will place the | |
521 | * sk_buff in the next buffer to be chained and return true indicating | |
522 | * that this is in fact a non-EOP buffer. | |
523 | **/ | |
524 | static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, | |
525 | union fm10k_rx_desc *rx_desc) | |
526 | { | |
527 | u32 ntc = rx_ring->next_to_clean + 1; | |
528 | ||
529 | /* fetch, update, and store next to clean */ | |
530 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
531 | rx_ring->next_to_clean = ntc; | |
532 | ||
533 | prefetch(FM10K_RX_DESC(rx_ring, ntc)); | |
534 | ||
535 | if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) | |
536 | return false; | |
537 | ||
538 | return true; | |
539 | } | |
540 | ||
b101c962 AD |
541 | /** |
542 | * fm10k_cleanup_headers - Correct corrupted or empty headers | |
543 | * @rx_ring: rx descriptor ring packet is being transacted on | |
544 | * @rx_desc: pointer to the EOP Rx descriptor | |
545 | * @skb: pointer to current skb being fixed | |
546 | * | |
547 | * Address the case where we are pulling data in on pages only | |
548 | * and as such no data is present in the skb header. | |
549 | * | |
550 | * In addition if skb is not at least 60 bytes we need to pad it so that | |
551 | * it is large enough to qualify as a valid Ethernet frame. | |
552 | * | |
553 | * Returns true if an error was encountered and skb was freed. | |
554 | **/ | |
555 | static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, | |
556 | union fm10k_rx_desc *rx_desc, | |
557 | struct sk_buff *skb) | |
558 | { | |
559 | if (unlikely((fm10k_test_staterr(rx_desc, | |
560 | FM10K_RXD_STATUS_RXE)))) { | |
80043f3b JK |
561 | #define FM10K_TEST_RXD_BIT(rxd, bit) \ |
562 | ((rxd)->w.csum_err & cpu_to_le16(bit)) | |
563 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) | |
564 | rx_ring->rx_stats.switch_errors++; | |
565 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) | |
566 | rx_ring->rx_stats.drops++; | |
567 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) | |
568 | rx_ring->rx_stats.pp_errors++; | |
569 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) | |
570 | rx_ring->rx_stats.link_errors++; | |
571 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) | |
572 | rx_ring->rx_stats.length_errors++; | |
b101c962 AD |
573 | dev_kfree_skb_any(skb); |
574 | rx_ring->rx_stats.errors++; | |
575 | return true; | |
576 | } | |
577 | ||
a94d9e22 AD |
578 | /* if eth_skb_pad returns an error the skb was freed */ |
579 | if (eth_skb_pad(skb)) | |
580 | return true; | |
b101c962 AD |
581 | |
582 | return false; | |
583 | } | |
584 | ||
585 | /** | |
586 | * fm10k_receive_skb - helper function to handle rx indications | |
587 | * @q_vector: structure containing interrupt and ring information | |
588 | * @skb: packet to send up | |
589 | **/ | |
590 | static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, | |
591 | struct sk_buff *skb) | |
592 | { | |
593 | napi_gro_receive(&q_vector->napi, skb); | |
594 | } | |
595 | ||
596 | static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, | |
597 | struct fm10k_ring *rx_ring, | |
598 | int budget) | |
599 | { | |
600 | struct sk_buff *skb = rx_ring->skb; | |
601 | unsigned int total_bytes = 0, total_packets = 0; | |
602 | u16 cleaned_count = fm10k_desc_unused(rx_ring); | |
603 | ||
59486329 | 604 | while (likely(total_packets < budget)) { |
b101c962 AD |
605 | union fm10k_rx_desc *rx_desc; |
606 | ||
607 | /* return some buffers to hardware, one at a time is too slow */ | |
608 | if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { | |
609 | fm10k_alloc_rx_buffers(rx_ring, cleaned_count); | |
610 | cleaned_count = 0; | |
611 | } | |
612 | ||
613 | rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); | |
614 | ||
124b74c1 | 615 | if (!rx_desc->d.staterr) |
b101c962 AD |
616 | break; |
617 | ||
618 | /* This memory barrier is needed to keep us from reading | |
619 | * any other fields out of the rx_desc until we know the | |
124b74c1 | 620 | * descriptor has been written back |
b101c962 | 621 | */ |
124b74c1 | 622 | dma_rmb(); |
b101c962 AD |
623 | |
624 | /* retrieve a buffer from the ring */ | |
625 | skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); | |
626 | ||
627 | /* exit if we failed to retrieve a buffer */ | |
628 | if (!skb) | |
629 | break; | |
630 | ||
631 | cleaned_count++; | |
632 | ||
633 | /* fetch next buffer in frame if non-eop */ | |
634 | if (fm10k_is_non_eop(rx_ring, rx_desc)) | |
635 | continue; | |
636 | ||
637 | /* verify the packet layout is correct */ | |
638 | if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { | |
639 | skb = NULL; | |
640 | continue; | |
641 | } | |
642 | ||
643 | /* populate checksum, timestamp, VLAN, and protocol */ | |
644 | total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); | |
645 | ||
646 | fm10k_receive_skb(q_vector, skb); | |
647 | ||
648 | /* reset skb pointer */ | |
649 | skb = NULL; | |
650 | ||
651 | /* update budget accounting */ | |
652 | total_packets++; | |
59486329 | 653 | } |
b101c962 AD |
654 | |
655 | /* place incomplete frames back on ring for completion */ | |
656 | rx_ring->skb = skb; | |
657 | ||
658 | u64_stats_update_begin(&rx_ring->syncp); | |
659 | rx_ring->stats.packets += total_packets; | |
660 | rx_ring->stats.bytes += total_bytes; | |
661 | u64_stats_update_end(&rx_ring->syncp); | |
662 | q_vector->rx.total_packets += total_packets; | |
663 | q_vector->rx.total_bytes += total_bytes; | |
664 | ||
665 | return total_packets < budget; | |
666 | } | |
667 | ||
76a540d4 AD |
668 | #define VXLAN_HLEN (sizeof(struct udphdr) + 8) |
669 | static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) | |
670 | { | |
671 | struct fm10k_intfc *interface = netdev_priv(skb->dev); | |
672 | struct fm10k_vxlan_port *vxlan_port; | |
673 | ||
674 | /* we can only offload a vxlan if we recognize it as such */ | |
675 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
676 | struct fm10k_vxlan_port, list); | |
677 | ||
678 | if (!vxlan_port) | |
679 | return NULL; | |
680 | if (vxlan_port->port != udp_hdr(skb)->dest) | |
681 | return NULL; | |
682 | ||
683 | /* return offset of udp_hdr plus 8 bytes for VXLAN header */ | |
684 | return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); | |
685 | } | |
686 | ||
687 | #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) | |
688 | #define NVGRE_TNI htons(0x2000) | |
689 | struct fm10k_nvgre_hdr { | |
690 | __be16 flags; | |
691 | __be16 proto; | |
692 | __be32 tni; | |
693 | }; | |
694 | ||
695 | static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) | |
696 | { | |
697 | struct fm10k_nvgre_hdr *nvgre_hdr; | |
698 | int hlen = ip_hdrlen(skb); | |
699 | ||
700 | /* currently only IPv4 is supported due to hlen above */ | |
701 | if (vlan_get_protocol(skb) != htons(ETH_P_IP)) | |
702 | return NULL; | |
703 | ||
704 | /* our transport header should be NVGRE */ | |
705 | nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); | |
706 | ||
707 | /* verify all reserved flags are 0 */ | |
708 | if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) | |
709 | return NULL; | |
710 | ||
76a540d4 AD |
711 | /* report start of ethernet header */ |
712 | if (nvgre_hdr->flags & NVGRE_TNI) | |
713 | return (struct ethhdr *)(nvgre_hdr + 1); | |
714 | ||
715 | return (struct ethhdr *)(&nvgre_hdr->tni); | |
716 | } | |
717 | ||
5bf33dc6 | 718 | __be16 fm10k_tx_encap_offload(struct sk_buff *skb) |
76a540d4 | 719 | { |
8c1a90aa | 720 | u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; |
76a540d4 | 721 | struct ethhdr *eth_hdr; |
76a540d4 | 722 | |
8c1a90aa MV |
723 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
724 | skb->inner_protocol != htons(ETH_P_TEB)) | |
b66b6d9f JS |
725 | return 0; |
726 | ||
76a540d4 AD |
727 | switch (vlan_get_protocol(skb)) { |
728 | case htons(ETH_P_IP): | |
729 | l4_hdr = ip_hdr(skb)->protocol; | |
730 | break; | |
731 | case htons(ETH_P_IPV6): | |
732 | l4_hdr = ipv6_hdr(skb)->nexthdr; | |
733 | break; | |
734 | default: | |
735 | return 0; | |
736 | } | |
737 | ||
738 | switch (l4_hdr) { | |
739 | case IPPROTO_UDP: | |
740 | eth_hdr = fm10k_port_is_vxlan(skb); | |
741 | break; | |
742 | case IPPROTO_GRE: | |
743 | eth_hdr = fm10k_gre_is_nvgre(skb); | |
744 | break; | |
745 | default: | |
746 | return 0; | |
747 | } | |
748 | ||
749 | if (!eth_hdr) | |
750 | return 0; | |
751 | ||
752 | switch (eth_hdr->h_proto) { | |
753 | case htons(ETH_P_IP): | |
8c1a90aa MV |
754 | inner_l4_hdr = inner_ip_hdr(skb)->protocol; |
755 | break; | |
76a540d4 | 756 | case htons(ETH_P_IPV6): |
8c1a90aa | 757 | inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; |
76a540d4 AD |
758 | break; |
759 | default: | |
760 | return 0; | |
761 | } | |
762 | ||
8c1a90aa MV |
763 | switch (inner_l4_hdr) { |
764 | case IPPROTO_TCP: | |
765 | inner_l4_hlen = inner_tcp_hdrlen(skb); | |
766 | break; | |
767 | case IPPROTO_UDP: | |
768 | inner_l4_hlen = 8; | |
769 | break; | |
770 | default: | |
771 | return 0; | |
772 | } | |
773 | ||
774 | /* The hardware allows tunnel offloads only if the combined inner and | |
775 | * outer header is 184 bytes or less | |
776 | */ | |
777 | if (skb_inner_transport_header(skb) + inner_l4_hlen - | |
778 | skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) | |
779 | return 0; | |
780 | ||
76a540d4 AD |
781 | return eth_hdr->h_proto; |
782 | } | |
783 | ||
784 | static int fm10k_tso(struct fm10k_ring *tx_ring, | |
785 | struct fm10k_tx_buffer *first) | |
786 | { | |
787 | struct sk_buff *skb = first->skb; | |
788 | struct fm10k_tx_desc *tx_desc; | |
789 | unsigned char *th; | |
790 | u8 hdrlen; | |
791 | ||
792 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
793 | return 0; | |
794 | ||
795 | if (!skb_is_gso(skb)) | |
796 | return 0; | |
797 | ||
798 | /* compute header lengths */ | |
799 | if (skb->encapsulation) { | |
800 | if (!fm10k_tx_encap_offload(skb)) | |
801 | goto err_vxlan; | |
802 | th = skb_inner_transport_header(skb); | |
803 | } else { | |
804 | th = skb_transport_header(skb); | |
805 | } | |
806 | ||
807 | /* compute offset from SOF to transport header and add header len */ | |
808 | hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); | |
809 | ||
810 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
811 | ||
812 | /* update gso size and bytecount with header size */ | |
813 | first->gso_segs = skb_shinfo(skb)->gso_segs; | |
814 | first->bytecount += (first->gso_segs - 1) * hdrlen; | |
815 | ||
816 | /* populate Tx descriptor header size and mss */ | |
817 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
818 | tx_desc->hdrlen = hdrlen; | |
819 | tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
820 | ||
821 | return 1; | |
822 | err_vxlan: | |
823 | tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; | |
824 | if (!net_ratelimit()) | |
825 | netdev_err(tx_ring->netdev, | |
826 | "TSO requested for unsupported tunnel, disabling offload\n"); | |
827 | return -1; | |
828 | } | |
829 | ||
830 | static void fm10k_tx_csum(struct fm10k_ring *tx_ring, | |
831 | struct fm10k_tx_buffer *first) | |
832 | { | |
833 | struct sk_buff *skb = first->skb; | |
834 | struct fm10k_tx_desc *tx_desc; | |
835 | union { | |
836 | struct iphdr *ipv4; | |
837 | struct ipv6hdr *ipv6; | |
838 | u8 *raw; | |
839 | } network_hdr; | |
840 | __be16 protocol; | |
841 | u8 l4_hdr = 0; | |
842 | ||
843 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
844 | goto no_csum; | |
845 | ||
846 | if (skb->encapsulation) { | |
847 | protocol = fm10k_tx_encap_offload(skb); | |
848 | if (!protocol) { | |
849 | if (skb_checksum_help(skb)) { | |
850 | dev_warn(tx_ring->dev, | |
851 | "failed to offload encap csum!\n"); | |
852 | tx_ring->tx_stats.csum_err++; | |
853 | } | |
854 | goto no_csum; | |
855 | } | |
856 | network_hdr.raw = skb_inner_network_header(skb); | |
857 | } else { | |
858 | protocol = vlan_get_protocol(skb); | |
859 | network_hdr.raw = skb_network_header(skb); | |
860 | } | |
861 | ||
862 | switch (protocol) { | |
863 | case htons(ETH_P_IP): | |
864 | l4_hdr = network_hdr.ipv4->protocol; | |
865 | break; | |
866 | case htons(ETH_P_IPV6): | |
867 | l4_hdr = network_hdr.ipv6->nexthdr; | |
868 | break; | |
869 | default: | |
870 | if (unlikely(net_ratelimit())) { | |
871 | dev_warn(tx_ring->dev, | |
872 | "partial checksum but ip version=%x!\n", | |
873 | protocol); | |
874 | } | |
875 | tx_ring->tx_stats.csum_err++; | |
876 | goto no_csum; | |
877 | } | |
878 | ||
879 | switch (l4_hdr) { | |
880 | case IPPROTO_TCP: | |
881 | case IPPROTO_UDP: | |
882 | break; | |
883 | case IPPROTO_GRE: | |
884 | if (skb->encapsulation) | |
885 | break; | |
886 | default: | |
887 | if (unlikely(net_ratelimit())) { | |
888 | dev_warn(tx_ring->dev, | |
889 | "partial checksum but l4 proto=%x!\n", | |
890 | l4_hdr); | |
891 | } | |
892 | tx_ring->tx_stats.csum_err++; | |
893 | goto no_csum; | |
894 | } | |
895 | ||
896 | /* update TX checksum flag */ | |
897 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
80043f3b | 898 | tx_ring->tx_stats.csum_good++; |
76a540d4 AD |
899 | |
900 | no_csum: | |
901 | /* populate Tx descriptor header size and mss */ | |
902 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
903 | tx_desc->hdrlen = 0; | |
904 | tx_desc->mss = 0; | |
905 | } | |
906 | ||
907 | #define FM10K_SET_FLAG(_input, _flag, _result) \ | |
908 | ((_flag <= _result) ? \ | |
909 | ((u32)(_input & _flag) * (_result / _flag)) : \ | |
910 | ((u32)(_input & _flag) / (_flag / _result))) | |
911 | ||
912 | static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) | |
913 | { | |
914 | /* set type for advanced descriptor with frame checksum insertion */ | |
915 | u32 desc_flags = 0; | |
916 | ||
a211e013 AD |
917 | /* set timestamping bits */ |
918 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
919 | likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | |
920 | desc_flags |= FM10K_TXD_FLAG_TIME; | |
921 | ||
76a540d4 AD |
922 | /* set checksum offload bits */ |
923 | desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, | |
924 | FM10K_TXD_FLAG_CSUM); | |
925 | ||
926 | return desc_flags; | |
927 | } | |
928 | ||
b101c962 AD |
929 | static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, |
930 | struct fm10k_tx_desc *tx_desc, u16 i, | |
931 | dma_addr_t dma, unsigned int size, u8 desc_flags) | |
932 | { | |
933 | /* set RS and INT for last frame in a cache line */ | |
934 | if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) | |
935 | desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; | |
936 | ||
937 | /* record values to descriptor */ | |
938 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
939 | tx_desc->flags = desc_flags; | |
940 | tx_desc->buflen = cpu_to_le16(size); | |
941 | ||
942 | /* return true if we just wrapped the ring */ | |
943 | return i == tx_ring->count; | |
944 | } | |
945 | ||
2c2b2f0c AD |
946 | static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) |
947 | { | |
948 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
949 | ||
eca32047 | 950 | /* Memory barrier before checking head and tail */ |
2c2b2f0c AD |
951 | smp_mb(); |
952 | ||
eca32047 | 953 | /* Check again in a case another CPU has just made room available */ |
2c2b2f0c AD |
954 | if (likely(fm10k_desc_unused(tx_ring) < size)) |
955 | return -EBUSY; | |
956 | ||
957 | /* A reprieve! - use start_queue because it doesn't call schedule */ | |
958 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
959 | ++tx_ring->tx_stats.restart_queue; | |
960 | return 0; | |
961 | } | |
962 | ||
963 | static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) | |
964 | { | |
965 | if (likely(fm10k_desc_unused(tx_ring) >= size)) | |
966 | return 0; | |
967 | return __fm10k_maybe_stop_tx(tx_ring, size); | |
968 | } | |
969 | ||
b101c962 AD |
970 | static void fm10k_tx_map(struct fm10k_ring *tx_ring, |
971 | struct fm10k_tx_buffer *first) | |
972 | { | |
973 | struct sk_buff *skb = first->skb; | |
974 | struct fm10k_tx_buffer *tx_buffer; | |
975 | struct fm10k_tx_desc *tx_desc; | |
976 | struct skb_frag_struct *frag; | |
977 | unsigned char *data; | |
978 | dma_addr_t dma; | |
979 | unsigned int data_len, size; | |
76a540d4 | 980 | u32 tx_flags = first->tx_flags; |
b101c962 | 981 | u16 i = tx_ring->next_to_use; |
76a540d4 | 982 | u8 flags = fm10k_tx_desc_flags(skb, tx_flags); |
b101c962 AD |
983 | |
984 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
985 | ||
986 | /* add HW VLAN tag */ | |
df8a39de JP |
987 | if (skb_vlan_tag_present(skb)) |
988 | tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); | |
b101c962 AD |
989 | else |
990 | tx_desc->vlan = 0; | |
991 | ||
992 | size = skb_headlen(skb); | |
993 | data = skb->data; | |
994 | ||
995 | dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); | |
996 | ||
997 | data_len = skb->data_len; | |
998 | tx_buffer = first; | |
999 | ||
1000 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { | |
1001 | if (dma_mapping_error(tx_ring->dev, dma)) | |
1002 | goto dma_error; | |
1003 | ||
1004 | /* record length, and DMA address */ | |
1005 | dma_unmap_len_set(tx_buffer, len, size); | |
1006 | dma_unmap_addr_set(tx_buffer, dma, dma); | |
1007 | ||
1008 | while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { | |
1009 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, | |
1010 | FM10K_MAX_DATA_PER_TXD, flags)) { | |
1011 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1012 | i = 0; | |
1013 | } | |
1014 | ||
1015 | dma += FM10K_MAX_DATA_PER_TXD; | |
1016 | size -= FM10K_MAX_DATA_PER_TXD; | |
1017 | } | |
1018 | ||
1019 | if (likely(!data_len)) | |
1020 | break; | |
1021 | ||
1022 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, | |
1023 | dma, size, flags)) { | |
1024 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1025 | i = 0; | |
1026 | } | |
1027 | ||
1028 | size = skb_frag_size(frag); | |
1029 | data_len -= size; | |
1030 | ||
1031 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, | |
1032 | DMA_TO_DEVICE); | |
1033 | ||
1034 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1035 | } | |
1036 | ||
1037 | /* write last descriptor with LAST bit set */ | |
1038 | flags |= FM10K_TXD_FLAG_LAST; | |
1039 | ||
1040 | if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) | |
1041 | i = 0; | |
1042 | ||
1043 | /* record bytecount for BQL */ | |
1044 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); | |
1045 | ||
1046 | /* record SW timestamp if HW timestamp is not available */ | |
1047 | skb_tx_timestamp(first->skb); | |
1048 | ||
1049 | /* Force memory writes to complete before letting h/w know there | |
1050 | * are new descriptors to fetch. (Only applicable for weak-ordered | |
1051 | * memory model archs, such as IA-64). | |
1052 | * | |
1053 | * We also need this memory barrier to make certain all of the | |
1054 | * status bits have been updated before next_to_watch is written. | |
1055 | */ | |
1056 | wmb(); | |
1057 | ||
1058 | /* set next_to_watch value indicating a packet is present */ | |
1059 | first->next_to_watch = tx_desc; | |
1060 | ||
1061 | tx_ring->next_to_use = i; | |
1062 | ||
2c2b2f0c AD |
1063 | /* Make sure there is space in the ring for the next send. */ |
1064 | fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); | |
1065 | ||
b101c962 | 1066 | /* notify HW of packet */ |
2c2b2f0c AD |
1067 | if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
1068 | writel(i, tx_ring->tail); | |
b101c962 | 1069 | |
2c2b2f0c AD |
1070 | /* we need this if more than one processor can write to our tail |
1071 | * at a time, it synchronizes IO on IA64/Altix systems | |
1072 | */ | |
1073 | mmiowb(); | |
1074 | } | |
b101c962 AD |
1075 | |
1076 | return; | |
1077 | dma_error: | |
1078 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | |
1079 | ||
1080 | /* clear dma mappings for failed tx_buffer map */ | |
1081 | for (;;) { | |
1082 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1083 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
1084 | if (tx_buffer == first) | |
1085 | break; | |
1086 | if (i == 0) | |
1087 | i = tx_ring->count; | |
1088 | i--; | |
1089 | } | |
1090 | ||
1091 | tx_ring->next_to_use = i; | |
1092 | } | |
1093 | ||
b101c962 AD |
1094 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, |
1095 | struct fm10k_ring *tx_ring) | |
1096 | { | |
1097 | struct fm10k_tx_buffer *first; | |
76a540d4 | 1098 | int tso; |
b101c962 | 1099 | u32 tx_flags = 0; |
b101c962 | 1100 | unsigned short f; |
b101c962 AD |
1101 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
1102 | ||
1103 | /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, | |
1104 | * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, | |
1105 | * + 2 desc gap to keep tail from touching head | |
1106 | * otherwise try next time | |
1107 | */ | |
b101c962 AD |
1108 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
1109 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | |
aae072e3 | 1110 | |
b101c962 AD |
1111 | if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { |
1112 | tx_ring->tx_stats.tx_busy++; | |
1113 | return NETDEV_TX_BUSY; | |
1114 | } | |
1115 | ||
1116 | /* record the location of the first descriptor for this packet */ | |
1117 | first = &tx_ring->tx_buffer[tx_ring->next_to_use]; | |
1118 | first->skb = skb; | |
1119 | first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); | |
1120 | first->gso_segs = 1; | |
1121 | ||
1122 | /* record initial flags and protocol */ | |
1123 | first->tx_flags = tx_flags; | |
1124 | ||
76a540d4 AD |
1125 | tso = fm10k_tso(tx_ring, first); |
1126 | if (tso < 0) | |
1127 | goto out_drop; | |
1128 | else if (!tso) | |
1129 | fm10k_tx_csum(tx_ring, first); | |
1130 | ||
b101c962 AD |
1131 | fm10k_tx_map(tx_ring, first); |
1132 | ||
76a540d4 AD |
1133 | return NETDEV_TX_OK; |
1134 | ||
1135 | out_drop: | |
1136 | dev_kfree_skb_any(first->skb); | |
1137 | first->skb = NULL; | |
1138 | ||
b101c962 AD |
1139 | return NETDEV_TX_OK; |
1140 | } | |
1141 | ||
1142 | static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) | |
1143 | { | |
1144 | return ring->stats.packets; | |
1145 | } | |
1146 | ||
1147 | static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) | |
1148 | { | |
1149 | /* use SW head and tail until we have real hardware */ | |
1150 | u32 head = ring->next_to_clean; | |
1151 | u32 tail = ring->next_to_use; | |
1152 | ||
1153 | return ((head <= tail) ? tail : tail + ring->count) - head; | |
1154 | } | |
1155 | ||
1156 | bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) | |
1157 | { | |
1158 | u32 tx_done = fm10k_get_tx_completed(tx_ring); | |
1159 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | |
1160 | u32 tx_pending = fm10k_get_tx_pending(tx_ring); | |
1161 | ||
1162 | clear_check_for_tx_hang(tx_ring); | |
1163 | ||
1164 | /* Check for a hung queue, but be thorough. This verifies | |
1165 | * that a transmit has been completed since the previous | |
1166 | * check AND there is at least one packet pending. By | |
1167 | * requiring this to fail twice we avoid races with | |
1168 | * clearing the ARMED bit and conditions where we | |
1169 | * run the check_tx_hang logic with a transmit completion | |
1170 | * pending but without time to complete it yet. | |
1171 | */ | |
1172 | if (!tx_pending || (tx_done_old != tx_done)) { | |
1173 | /* update completed stats and continue */ | |
1174 | tx_ring->tx_stats.tx_done_old = tx_done; | |
1175 | /* reset the countdown */ | |
1176 | clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1177 | ||
1178 | return false; | |
1179 | } | |
1180 | ||
1181 | /* make sure it is true for two checks in a row */ | |
1182 | return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1183 | } | |
1184 | ||
1185 | /** | |
1186 | * fm10k_tx_timeout_reset - initiate reset due to Tx timeout | |
1187 | * @interface: driver private struct | |
1188 | **/ | |
1189 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) | |
1190 | { | |
1191 | /* Do the reset outside of interrupt context */ | |
1192 | if (!test_bit(__FM10K_DOWN, &interface->state)) { | |
b101c962 AD |
1193 | interface->tx_timeout_count++; |
1194 | interface->flags |= FM10K_FLAG_RESET_REQUESTED; | |
1195 | fm10k_service_event_schedule(interface); | |
1196 | } | |
1197 | } | |
1198 | ||
1199 | /** | |
1200 | * fm10k_clean_tx_irq - Reclaim resources after transmit completes | |
1201 | * @q_vector: structure containing interrupt and ring information | |
1202 | * @tx_ring: tx ring to clean | |
1203 | **/ | |
1204 | static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, | |
1205 | struct fm10k_ring *tx_ring) | |
1206 | { | |
1207 | struct fm10k_intfc *interface = q_vector->interface; | |
1208 | struct fm10k_tx_buffer *tx_buffer; | |
1209 | struct fm10k_tx_desc *tx_desc; | |
1210 | unsigned int total_bytes = 0, total_packets = 0; | |
1211 | unsigned int budget = q_vector->tx.work_limit; | |
1212 | unsigned int i = tx_ring->next_to_clean; | |
1213 | ||
1214 | if (test_bit(__FM10K_DOWN, &interface->state)) | |
1215 | return true; | |
1216 | ||
1217 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1218 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
1219 | i -= tx_ring->count; | |
1220 | ||
1221 | do { | |
1222 | struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; | |
1223 | ||
1224 | /* if next_to_watch is not set then there is no work pending */ | |
1225 | if (!eop_desc) | |
1226 | break; | |
1227 | ||
1228 | /* prevent any other reads prior to eop_desc */ | |
1229 | read_barrier_depends(); | |
1230 | ||
1231 | /* if DD is not set pending work has not been completed */ | |
1232 | if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) | |
1233 | break; | |
1234 | ||
1235 | /* clear next_to_watch to prevent false hangs */ | |
1236 | tx_buffer->next_to_watch = NULL; | |
1237 | ||
1238 | /* update the statistics for this packet */ | |
1239 | total_bytes += tx_buffer->bytecount; | |
1240 | total_packets += tx_buffer->gso_segs; | |
1241 | ||
1242 | /* free the skb */ | |
1243 | dev_consume_skb_any(tx_buffer->skb); | |
1244 | ||
1245 | /* unmap skb header data */ | |
1246 | dma_unmap_single(tx_ring->dev, | |
1247 | dma_unmap_addr(tx_buffer, dma), | |
1248 | dma_unmap_len(tx_buffer, len), | |
1249 | DMA_TO_DEVICE); | |
1250 | ||
1251 | /* clear tx_buffer data */ | |
1252 | tx_buffer->skb = NULL; | |
1253 | dma_unmap_len_set(tx_buffer, len, 0); | |
1254 | ||
1255 | /* unmap remaining buffers */ | |
1256 | while (tx_desc != eop_desc) { | |
1257 | tx_buffer++; | |
1258 | tx_desc++; | |
1259 | i++; | |
1260 | if (unlikely(!i)) { | |
1261 | i -= tx_ring->count; | |
1262 | tx_buffer = tx_ring->tx_buffer; | |
1263 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1264 | } | |
1265 | ||
1266 | /* unmap any remaining paged data */ | |
1267 | if (dma_unmap_len(tx_buffer, len)) { | |
1268 | dma_unmap_page(tx_ring->dev, | |
1269 | dma_unmap_addr(tx_buffer, dma), | |
1270 | dma_unmap_len(tx_buffer, len), | |
1271 | DMA_TO_DEVICE); | |
1272 | dma_unmap_len_set(tx_buffer, len, 0); | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | /* move us one more past the eop_desc for start of next pkt */ | |
1277 | tx_buffer++; | |
1278 | tx_desc++; | |
1279 | i++; | |
1280 | if (unlikely(!i)) { | |
1281 | i -= tx_ring->count; | |
1282 | tx_buffer = tx_ring->tx_buffer; | |
1283 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1284 | } | |
1285 | ||
1286 | /* issue prefetch for next Tx descriptor */ | |
1287 | prefetch(tx_desc); | |
1288 | ||
1289 | /* update budget accounting */ | |
1290 | budget--; | |
1291 | } while (likely(budget)); | |
1292 | ||
1293 | i += tx_ring->count; | |
1294 | tx_ring->next_to_clean = i; | |
1295 | u64_stats_update_begin(&tx_ring->syncp); | |
1296 | tx_ring->stats.bytes += total_bytes; | |
1297 | tx_ring->stats.packets += total_packets; | |
1298 | u64_stats_update_end(&tx_ring->syncp); | |
1299 | q_vector->tx.total_bytes += total_bytes; | |
1300 | q_vector->tx.total_packets += total_packets; | |
1301 | ||
1302 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { | |
1303 | /* schedule immediate reset if we believe we hung */ | |
1304 | struct fm10k_hw *hw = &interface->hw; | |
1305 | ||
1306 | netif_err(interface, drv, tx_ring->netdev, | |
1307 | "Detected Tx Unit Hang\n" | |
1308 | " Tx Queue <%d>\n" | |
1309 | " TDH, TDT <%x>, <%x>\n" | |
1310 | " next_to_use <%x>\n" | |
1311 | " next_to_clean <%x>\n", | |
1312 | tx_ring->queue_index, | |
1313 | fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), | |
1314 | fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), | |
1315 | tx_ring->next_to_use, i); | |
1316 | ||
1317 | netif_stop_subqueue(tx_ring->netdev, | |
1318 | tx_ring->queue_index); | |
1319 | ||
1320 | netif_info(interface, probe, tx_ring->netdev, | |
1321 | "tx hang %d detected on queue %d, resetting interface\n", | |
1322 | interface->tx_timeout_count + 1, | |
1323 | tx_ring->queue_index); | |
1324 | ||
1325 | fm10k_tx_timeout_reset(interface); | |
1326 | ||
1327 | /* the netdev is about to reset, no point in enabling stuff */ | |
1328 | return true; | |
1329 | } | |
1330 | ||
1331 | /* notify netdev of completed buffers */ | |
1332 | netdev_tx_completed_queue(txring_txq(tx_ring), | |
1333 | total_packets, total_bytes); | |
1334 | ||
1335 | #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) | |
1336 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | |
1337 | (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { | |
1338 | /* Make sure that anybody stopping the queue after this | |
1339 | * sees the new next_to_clean. | |
1340 | */ | |
1341 | smp_mb(); | |
1342 | if (__netif_subqueue_stopped(tx_ring->netdev, | |
1343 | tx_ring->queue_index) && | |
1344 | !test_bit(__FM10K_DOWN, &interface->state)) { | |
1345 | netif_wake_subqueue(tx_ring->netdev, | |
1346 | tx_ring->queue_index); | |
1347 | ++tx_ring->tx_stats.restart_queue; | |
1348 | } | |
1349 | } | |
1350 | ||
1351 | return !!budget; | |
1352 | } | |
1353 | ||
18283cad AD |
1354 | /** |
1355 | * fm10k_update_itr - update the dynamic ITR value based on packet size | |
1356 | * | |
1357 | * Stores a new ITR value based on strictly on packet size. The | |
1358 | * divisors and thresholds used by this function were determined based | |
1359 | * on theoretical maximum wire speed and testing data, in order to | |
1360 | * minimize response time while increasing bulk throughput. | |
1361 | * | |
1362 | * @ring_container: Container for rings to have ITR updated | |
1363 | **/ | |
1364 | static void fm10k_update_itr(struct fm10k_ring_container *ring_container) | |
1365 | { | |
1366 | unsigned int avg_wire_size, packets; | |
1367 | ||
1368 | /* Only update ITR if we are using adaptive setting */ | |
1369 | if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) | |
1370 | goto clear_counts; | |
1371 | ||
1372 | packets = ring_container->total_packets; | |
1373 | if (!packets) | |
1374 | goto clear_counts; | |
1375 | ||
1376 | avg_wire_size = ring_container->total_bytes / packets; | |
1377 | ||
1378 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ | |
1379 | avg_wire_size += 24; | |
1380 | ||
1381 | /* Don't starve jumbo frames */ | |
1382 | if (avg_wire_size > 3000) | |
1383 | avg_wire_size = 3000; | |
1384 | ||
1385 | /* Give a little boost to mid-size frames */ | |
1386 | if ((avg_wire_size > 300) && (avg_wire_size < 1200)) | |
1387 | avg_wire_size /= 3; | |
1388 | else | |
1389 | avg_wire_size /= 2; | |
1390 | ||
1391 | /* write back value and retain adaptive flag */ | |
1392 | ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; | |
1393 | ||
1394 | clear_counts: | |
1395 | ring_container->total_bytes = 0; | |
1396 | ring_container->total_packets = 0; | |
1397 | } | |
1398 | ||
1399 | static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) | |
1400 | { | |
1401 | /* Enable auto-mask and clear the current mask */ | |
1402 | u32 itr = FM10K_ITR_ENABLE; | |
1403 | ||
1404 | /* Update Tx ITR */ | |
1405 | fm10k_update_itr(&q_vector->tx); | |
1406 | ||
1407 | /* Update Rx ITR */ | |
1408 | fm10k_update_itr(&q_vector->rx); | |
1409 | ||
1410 | /* Store Tx itr in timer slot 0 */ | |
1411 | itr |= (q_vector->tx.itr & FM10K_ITR_MAX); | |
1412 | ||
1413 | /* Shift Rx itr to timer slot 1 */ | |
1414 | itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; | |
1415 | ||
1416 | /* Write the final value to the ITR register */ | |
1417 | writel(itr, q_vector->itr); | |
1418 | } | |
1419 | ||
1420 | static int fm10k_poll(struct napi_struct *napi, int budget) | |
1421 | { | |
1422 | struct fm10k_q_vector *q_vector = | |
1423 | container_of(napi, struct fm10k_q_vector, napi); | |
b101c962 AD |
1424 | struct fm10k_ring *ring; |
1425 | int per_ring_budget; | |
1426 | bool clean_complete = true; | |
1427 | ||
1428 | fm10k_for_each_ring(ring, q_vector->tx) | |
1429 | clean_complete &= fm10k_clean_tx_irq(q_vector, ring); | |
1430 | ||
1431 | /* attempt to distribute budget to each queue fairly, but don't | |
1432 | * allow the budget to go below 1 because we'll exit polling | |
1433 | */ | |
1434 | if (q_vector->rx.count > 1) | |
1435 | per_ring_budget = max(budget/q_vector->rx.count, 1); | |
1436 | else | |
1437 | per_ring_budget = budget; | |
1438 | ||
1439 | fm10k_for_each_ring(ring, q_vector->rx) | |
1440 | clean_complete &= fm10k_clean_rx_irq(q_vector, ring, | |
1441 | per_ring_budget); | |
1442 | ||
1443 | /* If all work not completed, return budget and keep polling */ | |
1444 | if (!clean_complete) | |
1445 | return budget; | |
18283cad AD |
1446 | |
1447 | /* all work done, exit the polling mode */ | |
1448 | napi_complete(napi); | |
1449 | ||
1450 | /* re-enable the q_vector */ | |
1451 | fm10k_qv_enable(q_vector); | |
1452 | ||
1453 | return 0; | |
1454 | } | |
1455 | ||
aa3ac822 AD |
1456 | /** |
1457 | * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device | |
1458 | * @interface: board private structure to initialize | |
1459 | * | |
1460 | * When QoS (Quality of Service) is enabled, allocate queues for | |
1461 | * each traffic class. If multiqueue isn't available,then abort QoS | |
1462 | * initialization. | |
1463 | * | |
1464 | * This function handles all combinations of Qos and RSS. | |
1465 | * | |
1466 | **/ | |
1467 | static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) | |
1468 | { | |
1469 | struct net_device *dev = interface->netdev; | |
1470 | struct fm10k_ring_feature *f; | |
1471 | int rss_i, i; | |
1472 | int pcs; | |
1473 | ||
1474 | /* Map queue offset and counts onto allocated tx queues */ | |
1475 | pcs = netdev_get_num_tc(dev); | |
1476 | ||
1477 | if (pcs <= 1) | |
1478 | return false; | |
1479 | ||
1480 | /* set QoS mask and indices */ | |
1481 | f = &interface->ring_feature[RING_F_QOS]; | |
1482 | f->indices = pcs; | |
1483 | f->mask = (1 << fls(pcs - 1)) - 1; | |
1484 | ||
1485 | /* determine the upper limit for our current DCB mode */ | |
1486 | rss_i = interface->hw.mac.max_queues / pcs; | |
1487 | rss_i = 1 << (fls(rss_i) - 1); | |
1488 | ||
1489 | /* set RSS mask and indices */ | |
1490 | f = &interface->ring_feature[RING_F_RSS]; | |
1491 | rss_i = min_t(u16, rss_i, f->limit); | |
1492 | f->indices = rss_i; | |
1493 | f->mask = (1 << fls(rss_i - 1)) - 1; | |
1494 | ||
1495 | /* configure pause class to queue mapping */ | |
1496 | for (i = 0; i < pcs; i++) | |
1497 | netdev_set_tc_queue(dev, i, rss_i, rss_i * i); | |
1498 | ||
1499 | interface->num_rx_queues = rss_i * pcs; | |
1500 | interface->num_tx_queues = rss_i * pcs; | |
1501 | ||
1502 | return true; | |
1503 | } | |
1504 | ||
1505 | /** | |
1506 | * fm10k_set_rss_queues: Allocate queues for RSS | |
1507 | * @interface: board private structure to initialize | |
1508 | * | |
1509 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | |
1510 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | |
1511 | * | |
1512 | **/ | |
1513 | static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) | |
1514 | { | |
1515 | struct fm10k_ring_feature *f; | |
1516 | u16 rss_i; | |
1517 | ||
1518 | f = &interface->ring_feature[RING_F_RSS]; | |
1519 | rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); | |
1520 | ||
1521 | /* record indices and power of 2 mask for RSS */ | |
1522 | f->indices = rss_i; | |
1523 | f->mask = (1 << fls(rss_i - 1)) - 1; | |
1524 | ||
1525 | interface->num_rx_queues = rss_i; | |
1526 | interface->num_tx_queues = rss_i; | |
1527 | ||
1528 | return true; | |
1529 | } | |
1530 | ||
18283cad AD |
1531 | /** |
1532 | * fm10k_set_num_queues: Allocate queues for device, feature dependent | |
1533 | * @interface: board private structure to initialize | |
1534 | * | |
1535 | * This is the top level queue allocation routine. The order here is very | |
1536 | * important, starting with the "most" number of features turned on at once, | |
1537 | * and ending with the smallest set of features. This way large combinations | |
1538 | * can be allocated if they're turned on, and smaller combinations are the | |
1539 | * fallthrough conditions. | |
1540 | * | |
1541 | **/ | |
1542 | static void fm10k_set_num_queues(struct fm10k_intfc *interface) | |
1543 | { | |
1544 | /* Start with base case */ | |
1545 | interface->num_rx_queues = 1; | |
1546 | interface->num_tx_queues = 1; | |
aa3ac822 AD |
1547 | |
1548 | if (fm10k_set_qos_queues(interface)) | |
1549 | return; | |
1550 | ||
1551 | fm10k_set_rss_queues(interface); | |
18283cad AD |
1552 | } |
1553 | ||
1554 | /** | |
1555 | * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector | |
1556 | * @interface: board private structure to initialize | |
1557 | * @v_count: q_vectors allocated on interface, used for ring interleaving | |
1558 | * @v_idx: index of vector in interface struct | |
1559 | * @txr_count: total number of Tx rings to allocate | |
1560 | * @txr_idx: index of first Tx ring to allocate | |
1561 | * @rxr_count: total number of Rx rings to allocate | |
1562 | * @rxr_idx: index of first Rx ring to allocate | |
1563 | * | |
1564 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
1565 | **/ | |
1566 | static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, | |
1567 | unsigned int v_count, unsigned int v_idx, | |
1568 | unsigned int txr_count, unsigned int txr_idx, | |
1569 | unsigned int rxr_count, unsigned int rxr_idx) | |
1570 | { | |
1571 | struct fm10k_q_vector *q_vector; | |
e27ef599 | 1572 | struct fm10k_ring *ring; |
18283cad AD |
1573 | int ring_count, size; |
1574 | ||
1575 | ring_count = txr_count + rxr_count; | |
e27ef599 AD |
1576 | size = sizeof(struct fm10k_q_vector) + |
1577 | (sizeof(struct fm10k_ring) * ring_count); | |
18283cad AD |
1578 | |
1579 | /* allocate q_vector and rings */ | |
1580 | q_vector = kzalloc(size, GFP_KERNEL); | |
1581 | if (!q_vector) | |
1582 | return -ENOMEM; | |
1583 | ||
1584 | /* initialize NAPI */ | |
1585 | netif_napi_add(interface->netdev, &q_vector->napi, | |
1586 | fm10k_poll, NAPI_POLL_WEIGHT); | |
1587 | ||
1588 | /* tie q_vector and interface together */ | |
1589 | interface->q_vector[v_idx] = q_vector; | |
1590 | q_vector->interface = interface; | |
1591 | q_vector->v_idx = v_idx; | |
1592 | ||
e27ef599 AD |
1593 | /* initialize pointer to rings */ |
1594 | ring = q_vector->ring; | |
1595 | ||
18283cad | 1596 | /* save Tx ring container info */ |
e27ef599 AD |
1597 | q_vector->tx.ring = ring; |
1598 | q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; | |
18283cad AD |
1599 | q_vector->tx.itr = interface->tx_itr; |
1600 | q_vector->tx.count = txr_count; | |
1601 | ||
e27ef599 AD |
1602 | while (txr_count) { |
1603 | /* assign generic ring traits */ | |
1604 | ring->dev = &interface->pdev->dev; | |
1605 | ring->netdev = interface->netdev; | |
1606 | ||
1607 | /* configure backlink on ring */ | |
1608 | ring->q_vector = q_vector; | |
1609 | ||
1610 | /* apply Tx specific ring traits */ | |
1611 | ring->count = interface->tx_ring_count; | |
1612 | ring->queue_index = txr_idx; | |
1613 | ||
1614 | /* assign ring to interface */ | |
1615 | interface->tx_ring[txr_idx] = ring; | |
1616 | ||
1617 | /* update count and index */ | |
1618 | txr_count--; | |
1619 | txr_idx += v_count; | |
1620 | ||
1621 | /* push pointer to next ring */ | |
1622 | ring++; | |
1623 | } | |
1624 | ||
18283cad | 1625 | /* save Rx ring container info */ |
e27ef599 | 1626 | q_vector->rx.ring = ring; |
18283cad AD |
1627 | q_vector->rx.itr = interface->rx_itr; |
1628 | q_vector->rx.count = rxr_count; | |
1629 | ||
e27ef599 AD |
1630 | while (rxr_count) { |
1631 | /* assign generic ring traits */ | |
1632 | ring->dev = &interface->pdev->dev; | |
1633 | ring->netdev = interface->netdev; | |
5cd5e2e9 | 1634 | rcu_assign_pointer(ring->l2_accel, interface->l2_accel); |
e27ef599 AD |
1635 | |
1636 | /* configure backlink on ring */ | |
1637 | ring->q_vector = q_vector; | |
1638 | ||
1639 | /* apply Rx specific ring traits */ | |
1640 | ring->count = interface->rx_ring_count; | |
1641 | ring->queue_index = rxr_idx; | |
1642 | ||
1643 | /* assign ring to interface */ | |
1644 | interface->rx_ring[rxr_idx] = ring; | |
1645 | ||
1646 | /* update count and index */ | |
1647 | rxr_count--; | |
1648 | rxr_idx += v_count; | |
1649 | ||
1650 | /* push pointer to next ring */ | |
1651 | ring++; | |
1652 | } | |
1653 | ||
7461fd91 AD |
1654 | fm10k_dbg_q_vector_init(q_vector); |
1655 | ||
18283cad AD |
1656 | return 0; |
1657 | } | |
1658 | ||
1659 | /** | |
1660 | * fm10k_free_q_vector - Free memory allocated for specific interrupt vector | |
1661 | * @interface: board private structure to initialize | |
1662 | * @v_idx: Index of vector to be freed | |
1663 | * | |
1664 | * This function frees the memory allocated to the q_vector. In addition if | |
1665 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1666 | * to freeing the q_vector. | |
1667 | **/ | |
1668 | static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) | |
1669 | { | |
1670 | struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; | |
e27ef599 AD |
1671 | struct fm10k_ring *ring; |
1672 | ||
7461fd91 AD |
1673 | fm10k_dbg_q_vector_exit(q_vector); |
1674 | ||
e27ef599 AD |
1675 | fm10k_for_each_ring(ring, q_vector->tx) |
1676 | interface->tx_ring[ring->queue_index] = NULL; | |
1677 | ||
1678 | fm10k_for_each_ring(ring, q_vector->rx) | |
1679 | interface->rx_ring[ring->queue_index] = NULL; | |
18283cad AD |
1680 | |
1681 | interface->q_vector[v_idx] = NULL; | |
1682 | netif_napi_del(&q_vector->napi); | |
1683 | kfree_rcu(q_vector, rcu); | |
1684 | } | |
1685 | ||
1686 | /** | |
1687 | * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors | |
1688 | * @interface: board private structure to initialize | |
1689 | * | |
1690 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
1691 | * return -ENOMEM. | |
1692 | **/ | |
1693 | static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) | |
1694 | { | |
1695 | unsigned int q_vectors = interface->num_q_vectors; | |
1696 | unsigned int rxr_remaining = interface->num_rx_queues; | |
1697 | unsigned int txr_remaining = interface->num_tx_queues; | |
1698 | unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; | |
1699 | int err; | |
1700 | ||
1701 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | |
1702 | for (; rxr_remaining; v_idx++) { | |
1703 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1704 | 0, 0, 1, rxr_idx); | |
1705 | if (err) | |
1706 | goto err_out; | |
1707 | ||
1708 | /* update counts and index */ | |
1709 | rxr_remaining--; | |
1710 | rxr_idx++; | |
1711 | } | |
1712 | } | |
1713 | ||
1714 | for (; v_idx < q_vectors; v_idx++) { | |
1715 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | |
1716 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | |
1717 | ||
1718 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1719 | tqpv, txr_idx, | |
1720 | rqpv, rxr_idx); | |
1721 | ||
1722 | if (err) | |
1723 | goto err_out; | |
1724 | ||
1725 | /* update counts and index */ | |
1726 | rxr_remaining -= rqpv; | |
1727 | txr_remaining -= tqpv; | |
1728 | rxr_idx++; | |
1729 | txr_idx++; | |
1730 | } | |
1731 | ||
1732 | return 0; | |
1733 | ||
1734 | err_out: | |
1735 | interface->num_tx_queues = 0; | |
1736 | interface->num_rx_queues = 0; | |
1737 | interface->num_q_vectors = 0; | |
1738 | ||
1739 | while (v_idx--) | |
1740 | fm10k_free_q_vector(interface, v_idx); | |
1741 | ||
1742 | return -ENOMEM; | |
1743 | } | |
1744 | ||
1745 | /** | |
1746 | * fm10k_free_q_vectors - Free memory allocated for interrupt vectors | |
1747 | * @interface: board private structure to initialize | |
1748 | * | |
1749 | * This function frees the memory allocated to the q_vectors. In addition if | |
1750 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1751 | * to freeing the q_vector. | |
1752 | **/ | |
1753 | static void fm10k_free_q_vectors(struct fm10k_intfc *interface) | |
1754 | { | |
1755 | int v_idx = interface->num_q_vectors; | |
1756 | ||
1757 | interface->num_tx_queues = 0; | |
1758 | interface->num_rx_queues = 0; | |
1759 | interface->num_q_vectors = 0; | |
1760 | ||
1761 | while (v_idx--) | |
1762 | fm10k_free_q_vector(interface, v_idx); | |
1763 | } | |
1764 | ||
1765 | /** | |
1766 | * f10k_reset_msix_capability - reset MSI-X capability | |
1767 | * @interface: board private structure to initialize | |
1768 | * | |
1769 | * Reset the MSI-X capability back to its starting state | |
1770 | **/ | |
1771 | static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) | |
1772 | { | |
1773 | pci_disable_msix(interface->pdev); | |
1774 | kfree(interface->msix_entries); | |
1775 | interface->msix_entries = NULL; | |
1776 | } | |
1777 | ||
1778 | /** | |
1779 | * f10k_init_msix_capability - configure MSI-X capability | |
1780 | * @interface: board private structure to initialize | |
1781 | * | |
1782 | * Attempt to configure the interrupts using the best available | |
1783 | * capabilities of the hardware and the kernel. | |
1784 | **/ | |
1785 | static int fm10k_init_msix_capability(struct fm10k_intfc *interface) | |
1786 | { | |
1787 | struct fm10k_hw *hw = &interface->hw; | |
1788 | int v_budget, vector; | |
1789 | ||
1790 | /* It's easy to be greedy for MSI-X vectors, but it really | |
1791 | * doesn't do us much good if we have a lot more vectors | |
1792 | * than CPU's. So let's be conservative and only ask for | |
1793 | * (roughly) the same number of vectors as there are CPU's. | |
1794 | * the default is to use pairs of vectors | |
1795 | */ | |
1796 | v_budget = max(interface->num_rx_queues, interface->num_tx_queues); | |
1797 | v_budget = min_t(u16, v_budget, num_online_cpus()); | |
1798 | ||
1799 | /* account for vectors not related to queues */ | |
1800 | v_budget += NON_Q_VECTORS(hw); | |
1801 | ||
1802 | /* At the same time, hardware can only support a maximum of | |
1803 | * hw.mac->max_msix_vectors vectors. With features | |
1804 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | |
1805 | * descriptor queues supported by our device. Thus, we cap it off in | |
1806 | * those rare cases where the cpu count also exceeds our vector limit. | |
1807 | */ | |
1808 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | |
1809 | ||
1810 | /* A failure in MSI-X entry allocation is fatal. */ | |
1811 | interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), | |
1812 | GFP_KERNEL); | |
1813 | if (!interface->msix_entries) | |
1814 | return -ENOMEM; | |
1815 | ||
1816 | /* populate entry values */ | |
1817 | for (vector = 0; vector < v_budget; vector++) | |
1818 | interface->msix_entries[vector].entry = vector; | |
1819 | ||
1820 | /* Attempt to enable MSI-X with requested value */ | |
1821 | v_budget = pci_enable_msix_range(interface->pdev, | |
1822 | interface->msix_entries, | |
1823 | MIN_MSIX_COUNT(hw), | |
1824 | v_budget); | |
1825 | if (v_budget < 0) { | |
1826 | kfree(interface->msix_entries); | |
1827 | interface->msix_entries = NULL; | |
1828 | return -ENOMEM; | |
1829 | } | |
1830 | ||
1831 | /* record the number of queues available for q_vectors */ | |
1832 | interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); | |
1833 | ||
1834 | return 0; | |
1835 | } | |
1836 | ||
aa3ac822 AD |
1837 | /** |
1838 | * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS | |
1839 | * @interface: Interface structure continaining rings and devices | |
1840 | * | |
1841 | * Cache the descriptor ring offsets for Qos | |
1842 | **/ | |
1843 | static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) | |
1844 | { | |
1845 | struct net_device *dev = interface->netdev; | |
1846 | int pc, offset, rss_i, i, q_idx; | |
1847 | u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; | |
1848 | u8 num_pcs = netdev_get_num_tc(dev); | |
1849 | ||
1850 | if (num_pcs <= 1) | |
1851 | return false; | |
1852 | ||
1853 | rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1854 | ||
1855 | for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { | |
1856 | q_idx = pc; | |
1857 | for (i = 0; i < rss_i; i++) { | |
1858 | interface->tx_ring[offset + i]->reg_idx = q_idx; | |
1859 | interface->tx_ring[offset + i]->qos_pc = pc; | |
1860 | interface->rx_ring[offset + i]->reg_idx = q_idx; | |
1861 | interface->rx_ring[offset + i]->qos_pc = pc; | |
1862 | q_idx += pc_stride; | |
1863 | } | |
1864 | } | |
1865 | ||
1866 | return true; | |
1867 | } | |
1868 | ||
1869 | /** | |
1870 | * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS | |
1871 | * @interface: Interface structure continaining rings and devices | |
1872 | * | |
1873 | * Cache the descriptor ring offsets for RSS | |
1874 | **/ | |
1875 | static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) | |
1876 | { | |
1877 | int i; | |
1878 | ||
1879 | for (i = 0; i < interface->num_rx_queues; i++) | |
1880 | interface->rx_ring[i]->reg_idx = i; | |
1881 | ||
1882 | for (i = 0; i < interface->num_tx_queues; i++) | |
1883 | interface->tx_ring[i]->reg_idx = i; | |
1884 | } | |
1885 | ||
1886 | /** | |
1887 | * fm10k_assign_rings - Map rings to network devices | |
1888 | * @interface: Interface structure containing rings and devices | |
1889 | * | |
1890 | * This function is meant to go though and configure both the network | |
1891 | * devices so that they contain rings, and configure the rings so that | |
1892 | * they function with their network devices. | |
1893 | **/ | |
1894 | static void fm10k_assign_rings(struct fm10k_intfc *interface) | |
1895 | { | |
1896 | if (fm10k_cache_ring_qos(interface)) | |
1897 | return; | |
1898 | ||
1899 | fm10k_cache_ring_rss(interface); | |
1900 | } | |
1901 | ||
18283cad AD |
1902 | static void fm10k_init_reta(struct fm10k_intfc *interface) |
1903 | { | |
1904 | u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1905 | u32 reta, base; | |
1906 | ||
1907 | /* If the netdev is initialized we have to maintain table if possible */ | |
b4a5127b | 1908 | if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { |
18283cad AD |
1909 | for (i = FM10K_RETA_SIZE; i--;) { |
1910 | reta = interface->reta[i]; | |
1911 | if ((((reta << 24) >> 24) < rss_i) && | |
1912 | (((reta << 16) >> 24) < rss_i) && | |
1913 | (((reta << 8) >> 24) < rss_i) && | |
1914 | (((reta) >> 24) < rss_i)) | |
1915 | continue; | |
1916 | goto repopulate_reta; | |
1917 | } | |
1918 | ||
1919 | /* do nothing if all of the elements are in bounds */ | |
1920 | return; | |
1921 | } | |
1922 | ||
1923 | repopulate_reta: | |
1924 | /* Populate the redirection table 4 entries at a time. To do this | |
1925 | * we are generating the results for n and n+2 and then interleaving | |
1926 | * those with the results with n+1 and n+3. | |
1927 | */ | |
1928 | for (i = FM10K_RETA_SIZE; i--;) { | |
1929 | /* first pass generates n and n+2 */ | |
1930 | base = ((i * 0x00040004) + 0x00020000) * rss_i; | |
1931 | reta = (base & 0x3F803F80) >> 7; | |
1932 | ||
1933 | /* second pass generates n+1 and n+3 */ | |
1934 | base += 0x00010001 * rss_i; | |
1935 | reta |= (base & 0x3F803F80) << 1; | |
1936 | ||
1937 | interface->reta[i] = reta; | |
1938 | } | |
1939 | } | |
1940 | ||
1941 | /** | |
1942 | * fm10k_init_queueing_scheme - Determine proper queueing scheme | |
1943 | * @interface: board private structure to initialize | |
1944 | * | |
1945 | * We determine which queueing scheme to use based on... | |
1946 | * - Hardware queue count (num_*_queues) | |
1947 | * - defined by miscellaneous hardware support/features (RSS, etc.) | |
1948 | **/ | |
1949 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) | |
1950 | { | |
1951 | int err; | |
1952 | ||
1953 | /* Number of supported queues */ | |
1954 | fm10k_set_num_queues(interface); | |
1955 | ||
1956 | /* Configure MSI-X capability */ | |
1957 | err = fm10k_init_msix_capability(interface); | |
1958 | if (err) { | |
1959 | dev_err(&interface->pdev->dev, | |
1960 | "Unable to initialize MSI-X capability\n"); | |
1961 | return err; | |
1962 | } | |
1963 | ||
1964 | /* Allocate memory for queues */ | |
1965 | err = fm10k_alloc_q_vectors(interface); | |
1966 | if (err) | |
1967 | return err; | |
1968 | ||
aa3ac822 AD |
1969 | /* Map rings to devices, and map devices to physical queues */ |
1970 | fm10k_assign_rings(interface); | |
1971 | ||
18283cad AD |
1972 | /* Initialize RSS redirection table */ |
1973 | fm10k_init_reta(interface); | |
1974 | ||
1975 | return 0; | |
1976 | } | |
1977 | ||
1978 | /** | |
1979 | * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings | |
1980 | * @interface: board private structure to clear queueing scheme on | |
1981 | * | |
1982 | * We go through and clear queueing specific resources and reset the structure | |
1983 | * to pre-load conditions | |
1984 | **/ | |
1985 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) | |
1986 | { | |
1987 | fm10k_free_q_vectors(interface); | |
1988 | fm10k_reset_msix_capability(interface); | |
1989 | } |