Commit | Line | Data |
---|---|---|
b3890e30 AD |
1 | /* Intel Ethernet Switch Host Interface Driver |
2 | * Copyright(c) 2013 - 2014 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include <linux/types.h> | |
22 | #include <linux/module.h> | |
23 | #include <net/ipv6.h> | |
24 | #include <net/ip.h> | |
25 | #include <net/tcp.h> | |
26 | #include <linux/if_macvlan.h> | |
b101c962 | 27 | #include <linux/prefetch.h> |
b3890e30 AD |
28 | |
29 | #include "fm10k.h" | |
30 | ||
31 | #define DRV_VERSION "0.12.2-k" | |
32 | const char fm10k_driver_version[] = DRV_VERSION; | |
33 | char fm10k_driver_name[] = "fm10k"; | |
34 | static const char fm10k_driver_string[] = | |
35 | "Intel(R) Ethernet Switch Host Interface Driver"; | |
36 | static const char fm10k_copyright[] = | |
37 | "Copyright (c) 2013 Intel Corporation."; | |
38 | ||
39 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | |
40 | MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); | |
41 | MODULE_LICENSE("GPL"); | |
42 | MODULE_VERSION(DRV_VERSION); | |
43 | ||
6d2ce900 AD |
44 | /** |
45 | * fm10k_init_module - Driver Registration Routine | |
b3890e30 AD |
46 | * |
47 | * fm10k_init_module is the first routine called when the driver is | |
48 | * loaded. All it does is register with the PCI subsystem. | |
49 | **/ | |
50 | static int __init fm10k_init_module(void) | |
51 | { | |
52 | pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); | |
53 | pr_info("%s\n", fm10k_copyright); | |
54 | ||
7461fd91 AD |
55 | fm10k_dbg_init(); |
56 | ||
b3890e30 AD |
57 | return fm10k_register_pci_driver(); |
58 | } | |
59 | module_init(fm10k_init_module); | |
60 | ||
61 | /** | |
62 | * fm10k_exit_module - Driver Exit Cleanup Routine | |
63 | * | |
64 | * fm10k_exit_module is called just before the driver is removed | |
65 | * from memory. | |
66 | **/ | |
67 | static void __exit fm10k_exit_module(void) | |
68 | { | |
69 | fm10k_unregister_pci_driver(); | |
7461fd91 AD |
70 | |
71 | fm10k_dbg_exit(); | |
b3890e30 AD |
72 | } |
73 | module_exit(fm10k_exit_module); | |
18283cad | 74 | |
b101c962 AD |
75 | static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, |
76 | struct fm10k_rx_buffer *bi) | |
77 | { | |
78 | struct page *page = bi->page; | |
79 | dma_addr_t dma; | |
80 | ||
81 | /* Only page will be NULL if buffer was consumed */ | |
82 | if (likely(page)) | |
83 | return true; | |
84 | ||
85 | /* alloc new page for storage */ | |
86 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | |
87 | if (unlikely(!page)) { | |
88 | rx_ring->rx_stats.alloc_failed++; | |
89 | return false; | |
90 | } | |
91 | ||
92 | /* map page for use */ | |
93 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
94 | ||
95 | /* if mapping failed free memory back to system since | |
96 | * there isn't much point in holding memory we can't use | |
97 | */ | |
98 | if (dma_mapping_error(rx_ring->dev, dma)) { | |
99 | __free_page(page); | |
100 | bi->page = NULL; | |
101 | ||
102 | rx_ring->rx_stats.alloc_failed++; | |
103 | return false; | |
104 | } | |
105 | ||
106 | bi->dma = dma; | |
107 | bi->page = page; | |
108 | bi->page_offset = 0; | |
109 | ||
110 | return true; | |
111 | } | |
112 | ||
113 | /** | |
114 | * fm10k_alloc_rx_buffers - Replace used receive buffers | |
115 | * @rx_ring: ring to place buffers on | |
116 | * @cleaned_count: number of buffers to replace | |
117 | **/ | |
118 | void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) | |
119 | { | |
120 | union fm10k_rx_desc *rx_desc; | |
121 | struct fm10k_rx_buffer *bi; | |
122 | u16 i = rx_ring->next_to_use; | |
123 | ||
124 | /* nothing to do */ | |
125 | if (!cleaned_count) | |
126 | return; | |
127 | ||
128 | rx_desc = FM10K_RX_DESC(rx_ring, i); | |
129 | bi = &rx_ring->rx_buffer[i]; | |
130 | i -= rx_ring->count; | |
131 | ||
132 | do { | |
133 | if (!fm10k_alloc_mapped_page(rx_ring, bi)) | |
134 | break; | |
135 | ||
136 | /* Refresh the desc even if buffer_addrs didn't change | |
137 | * because each write-back erases this info. | |
138 | */ | |
139 | rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); | |
140 | ||
141 | rx_desc++; | |
142 | bi++; | |
143 | i++; | |
144 | if (unlikely(!i)) { | |
145 | rx_desc = FM10K_RX_DESC(rx_ring, 0); | |
146 | bi = rx_ring->rx_buffer; | |
147 | i -= rx_ring->count; | |
148 | } | |
149 | ||
150 | /* clear the hdr_addr for the next_to_use descriptor */ | |
151 | rx_desc->q.hdr_addr = 0; | |
152 | ||
153 | cleaned_count--; | |
154 | } while (cleaned_count); | |
155 | ||
156 | i += rx_ring->count; | |
157 | ||
158 | if (rx_ring->next_to_use != i) { | |
159 | /* record the next descriptor to use */ | |
160 | rx_ring->next_to_use = i; | |
161 | ||
162 | /* update next to alloc since we have filled the ring */ | |
163 | rx_ring->next_to_alloc = i; | |
164 | ||
165 | /* Force memory writes to complete before letting h/w | |
166 | * know there are new descriptors to fetch. (Only | |
167 | * applicable for weak-ordered memory model archs, | |
168 | * such as IA-64). | |
169 | */ | |
170 | wmb(); | |
171 | ||
172 | /* notify hardware of new descriptors */ | |
173 | writel(i, rx_ring->tail); | |
174 | } | |
175 | } | |
176 | ||
177 | /** | |
178 | * fm10k_reuse_rx_page - page flip buffer and store it back on the ring | |
179 | * @rx_ring: rx descriptor ring to store buffers on | |
180 | * @old_buff: donor buffer to have page reused | |
181 | * | |
182 | * Synchronizes page for reuse by the interface | |
183 | **/ | |
184 | static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |
185 | struct fm10k_rx_buffer *old_buff) | |
186 | { | |
187 | struct fm10k_rx_buffer *new_buff; | |
188 | u16 nta = rx_ring->next_to_alloc; | |
189 | ||
190 | new_buff = &rx_ring->rx_buffer[nta]; | |
191 | ||
192 | /* update, and store next to alloc */ | |
193 | nta++; | |
194 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
195 | ||
196 | /* transfer page from old buffer to new buffer */ | |
197 | memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); | |
198 | ||
199 | /* sync the buffer for use by the device */ | |
200 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | |
201 | old_buff->page_offset, | |
202 | FM10K_RX_BUFSZ, | |
203 | DMA_FROM_DEVICE); | |
204 | } | |
205 | ||
206 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, | |
207 | struct page *page, | |
208 | unsigned int truesize) | |
209 | { | |
210 | /* avoid re-using remote pages */ | |
211 | if (unlikely(page_to_nid(page) != numa_mem_id())) | |
212 | return false; | |
213 | ||
214 | #if (PAGE_SIZE < 8192) | |
215 | /* if we are only owner of page we can reuse it */ | |
216 | if (unlikely(page_count(page) != 1)) | |
217 | return false; | |
218 | ||
219 | /* flip page offset to other buffer */ | |
220 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; | |
221 | ||
42b0270b ED |
222 | /* Even if we own the page, we are not allowed to use atomic_set() |
223 | * This would break get_page_unless_zero() users. | |
b101c962 | 224 | */ |
42b0270b | 225 | atomic_inc(&page->_count); |
b101c962 AD |
226 | #else |
227 | /* move offset up to the next cache line */ | |
228 | rx_buffer->page_offset += truesize; | |
229 | ||
230 | if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) | |
231 | return false; | |
232 | ||
233 | /* bump ref count on page before it is given to the stack */ | |
234 | get_page(page); | |
235 | #endif | |
236 | ||
237 | return true; | |
238 | } | |
239 | ||
240 | /** | |
241 | * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff | |
242 | * @rx_ring: rx descriptor ring to transact packets on | |
243 | * @rx_buffer: buffer containing page to add | |
244 | * @rx_desc: descriptor containing length of buffer written by hardware | |
245 | * @skb: sk_buff to place the data into | |
246 | * | |
247 | * This function will add the data contained in rx_buffer->page to the skb. | |
248 | * This is done either through a direct copy if the data in the buffer is | |
249 | * less than the skb header size, otherwise it will just attach the page as | |
250 | * a frag to the skb. | |
251 | * | |
252 | * The function will then update the page offset if necessary and return | |
253 | * true if the buffer can be reused by the interface. | |
254 | **/ | |
255 | static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, | |
256 | struct fm10k_rx_buffer *rx_buffer, | |
257 | union fm10k_rx_desc *rx_desc, | |
258 | struct sk_buff *skb) | |
259 | { | |
260 | struct page *page = rx_buffer->page; | |
261 | unsigned int size = le16_to_cpu(rx_desc->w.length); | |
262 | #if (PAGE_SIZE < 8192) | |
263 | unsigned int truesize = FM10K_RX_BUFSZ; | |
264 | #else | |
265 | unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); | |
266 | #endif | |
267 | ||
268 | if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { | |
269 | unsigned char *va = page_address(page) + rx_buffer->page_offset; | |
270 | ||
271 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | |
272 | ||
273 | /* we can reuse buffer as-is, just make sure it is local */ | |
274 | if (likely(page_to_nid(page) == numa_mem_id())) | |
275 | return true; | |
276 | ||
277 | /* this page cannot be reused so discard it */ | |
278 | put_page(page); | |
279 | return false; | |
280 | } | |
281 | ||
282 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | |
283 | rx_buffer->page_offset, size, truesize); | |
284 | ||
285 | return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); | |
286 | } | |
287 | ||
288 | static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, | |
289 | union fm10k_rx_desc *rx_desc, | |
290 | struct sk_buff *skb) | |
291 | { | |
292 | struct fm10k_rx_buffer *rx_buffer; | |
293 | struct page *page; | |
294 | ||
295 | rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; | |
296 | ||
297 | page = rx_buffer->page; | |
298 | prefetchw(page); | |
299 | ||
300 | if (likely(!skb)) { | |
301 | void *page_addr = page_address(page) + | |
302 | rx_buffer->page_offset; | |
303 | ||
304 | /* prefetch first cache line of first page */ | |
305 | prefetch(page_addr); | |
306 | #if L1_CACHE_BYTES < 128 | |
307 | prefetch(page_addr + L1_CACHE_BYTES); | |
308 | #endif | |
309 | ||
310 | /* allocate a skb to store the frags */ | |
311 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | |
312 | FM10K_RX_HDR_LEN); | |
313 | if (unlikely(!skb)) { | |
314 | rx_ring->rx_stats.alloc_failed++; | |
315 | return NULL; | |
316 | } | |
317 | ||
318 | /* we will be copying header into skb->data in | |
319 | * pskb_may_pull so it is in our interest to prefetch | |
320 | * it now to avoid a possible cache miss | |
321 | */ | |
322 | prefetchw(skb->data); | |
323 | } | |
324 | ||
325 | /* we are reusing so sync this buffer for CPU use */ | |
326 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
327 | rx_buffer->dma, | |
328 | rx_buffer->page_offset, | |
329 | FM10K_RX_BUFSZ, | |
330 | DMA_FROM_DEVICE); | |
331 | ||
332 | /* pull page into skb */ | |
333 | if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { | |
334 | /* hand second half of page back to the ring */ | |
335 | fm10k_reuse_rx_page(rx_ring, rx_buffer); | |
336 | } else { | |
337 | /* we are not reusing the buffer so unmap it */ | |
338 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | |
339 | PAGE_SIZE, DMA_FROM_DEVICE); | |
340 | } | |
341 | ||
342 | /* clear contents of rx_buffer */ | |
343 | rx_buffer->page = NULL; | |
344 | ||
345 | return skb; | |
346 | } | |
347 | ||
76a540d4 AD |
348 | static inline void fm10k_rx_checksum(struct fm10k_ring *ring, |
349 | union fm10k_rx_desc *rx_desc, | |
350 | struct sk_buff *skb) | |
351 | { | |
352 | skb_checksum_none_assert(skb); | |
353 | ||
354 | /* Rx checksum disabled via ethtool */ | |
355 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) | |
356 | return; | |
357 | ||
358 | /* TCP/UDP checksum error bit is set */ | |
359 | if (fm10k_test_staterr(rx_desc, | |
360 | FM10K_RXD_STATUS_L4E | | |
361 | FM10K_RXD_STATUS_L4E2 | | |
362 | FM10K_RXD_STATUS_IPE | | |
363 | FM10K_RXD_STATUS_IPE2)) { | |
364 | ring->rx_stats.csum_err++; | |
365 | return; | |
366 | } | |
367 | ||
368 | /* It must be a TCP or UDP packet with a valid checksum */ | |
369 | if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) | |
370 | skb->encapsulation = true; | |
371 | else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) | |
372 | return; | |
373 | ||
374 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
375 | } | |
376 | ||
377 | #define FM10K_RSS_L4_TYPES_MASK \ | |
378 | ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ | |
379 | (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ | |
380 | (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ | |
381 | (1ul << FM10K_RSSTYPE_IPV6_UDP)) | |
382 | ||
383 | static inline void fm10k_rx_hash(struct fm10k_ring *ring, | |
384 | union fm10k_rx_desc *rx_desc, | |
385 | struct sk_buff *skb) | |
386 | { | |
387 | u16 rss_type; | |
388 | ||
389 | if (!(ring->netdev->features & NETIF_F_RXHASH)) | |
390 | return; | |
391 | ||
392 | rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; | |
393 | if (!rss_type) | |
394 | return; | |
395 | ||
396 | skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), | |
397 | (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? | |
398 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); | |
399 | } | |
400 | ||
a211e013 AD |
401 | static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, |
402 | union fm10k_rx_desc *rx_desc, | |
403 | struct sk_buff *skb) | |
404 | { | |
405 | struct fm10k_intfc *interface = rx_ring->q_vector->interface; | |
406 | ||
407 | FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; | |
408 | ||
409 | if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) | |
410 | fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), | |
411 | le64_to_cpu(rx_desc->q.timestamp)); | |
412 | } | |
413 | ||
5cd5e2e9 AD |
414 | static void fm10k_type_trans(struct fm10k_ring *rx_ring, |
415 | union fm10k_rx_desc *rx_desc, | |
416 | struct sk_buff *skb) | |
417 | { | |
418 | struct net_device *dev = rx_ring->netdev; | |
419 | struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); | |
420 | ||
421 | /* check to see if DGLORT belongs to a MACVLAN */ | |
422 | if (l2_accel) { | |
423 | u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; | |
424 | ||
425 | idx -= l2_accel->dglort; | |
426 | if (idx < l2_accel->size && l2_accel->macvlan[idx]) | |
427 | dev = l2_accel->macvlan[idx]; | |
428 | else | |
429 | l2_accel = NULL; | |
430 | } | |
431 | ||
432 | skb->protocol = eth_type_trans(skb, dev); | |
433 | ||
434 | if (!l2_accel) | |
435 | return; | |
436 | ||
437 | /* update MACVLAN statistics */ | |
438 | macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, | |
439 | !!(rx_desc->w.hdr_info & | |
440 | cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); | |
441 | } | |
442 | ||
b101c962 AD |
443 | /** |
444 | * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor | |
445 | * @rx_ring: rx descriptor ring packet is being transacted on | |
446 | * @rx_desc: pointer to the EOP Rx descriptor | |
447 | * @skb: pointer to current skb being populated | |
448 | * | |
449 | * This function checks the ring, descriptor, and packet information in | |
450 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and | |
451 | * other fields within the skb. | |
452 | **/ | |
453 | static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, | |
454 | union fm10k_rx_desc *rx_desc, | |
455 | struct sk_buff *skb) | |
456 | { | |
457 | unsigned int len = skb->len; | |
458 | ||
76a540d4 AD |
459 | fm10k_rx_hash(rx_ring, rx_desc, skb); |
460 | ||
461 | fm10k_rx_checksum(rx_ring, rx_desc, skb); | |
462 | ||
a211e013 AD |
463 | fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); |
464 | ||
b101c962 AD |
465 | FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; |
466 | ||
467 | skb_record_rx_queue(skb, rx_ring->queue_index); | |
468 | ||
469 | FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; | |
470 | ||
471 | if (rx_desc->w.vlan) { | |
472 | u16 vid = le16_to_cpu(rx_desc->w.vlan); | |
473 | ||
474 | if (vid != rx_ring->vid) | |
475 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | |
476 | } | |
477 | ||
5cd5e2e9 | 478 | fm10k_type_trans(rx_ring, rx_desc, skb); |
b101c962 AD |
479 | |
480 | return len; | |
481 | } | |
482 | ||
483 | /** | |
484 | * fm10k_is_non_eop - process handling of non-EOP buffers | |
485 | * @rx_ring: Rx ring being processed | |
486 | * @rx_desc: Rx descriptor for current buffer | |
487 | * | |
488 | * This function updates next to clean. If the buffer is an EOP buffer | |
489 | * this function exits returning false, otherwise it will place the | |
490 | * sk_buff in the next buffer to be chained and return true indicating | |
491 | * that this is in fact a non-EOP buffer. | |
492 | **/ | |
493 | static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, | |
494 | union fm10k_rx_desc *rx_desc) | |
495 | { | |
496 | u32 ntc = rx_ring->next_to_clean + 1; | |
497 | ||
498 | /* fetch, update, and store next to clean */ | |
499 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
500 | rx_ring->next_to_clean = ntc; | |
501 | ||
502 | prefetch(FM10K_RX_DESC(rx_ring, ntc)); | |
503 | ||
504 | if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) | |
505 | return false; | |
506 | ||
507 | return true; | |
508 | } | |
509 | ||
510 | /** | |
511 | * fm10k_pull_tail - fm10k specific version of skb_pull_tail | |
512 | * @rx_ring: rx descriptor ring packet is being transacted on | |
513 | * @rx_desc: pointer to the EOP Rx descriptor | |
514 | * @skb: pointer to current skb being adjusted | |
515 | * | |
516 | * This function is an fm10k specific version of __pskb_pull_tail. The | |
517 | * main difference between this version and the original function is that | |
518 | * this function can make several assumptions about the state of things | |
519 | * that allow for significant optimizations versus the standard function. | |
520 | * As a result we can do things like drop a frag and maintain an accurate | |
521 | * truesize for the skb. | |
522 | */ | |
523 | static void fm10k_pull_tail(struct fm10k_ring *rx_ring, | |
524 | union fm10k_rx_desc *rx_desc, | |
525 | struct sk_buff *skb) | |
526 | { | |
527 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | |
528 | unsigned char *va; | |
529 | unsigned int pull_len; | |
530 | ||
531 | /* it is valid to use page_address instead of kmap since we are | |
532 | * working with pages allocated out of the lomem pool per | |
533 | * alloc_page(GFP_ATOMIC) | |
534 | */ | |
535 | va = skb_frag_address(frag); | |
536 | ||
537 | /* we need the header to contain the greater of either ETH_HLEN or | |
538 | * 60 bytes if the skb->len is less than 60 for skb_pad. | |
539 | */ | |
540 | pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); | |
541 | ||
542 | /* align pull length to size of long to optimize memcpy performance */ | |
543 | skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); | |
544 | ||
545 | /* update all of the pointers */ | |
546 | skb_frag_size_sub(frag, pull_len); | |
547 | frag->page_offset += pull_len; | |
548 | skb->data_len -= pull_len; | |
549 | skb->tail += pull_len; | |
550 | } | |
551 | ||
552 | /** | |
553 | * fm10k_cleanup_headers - Correct corrupted or empty headers | |
554 | * @rx_ring: rx descriptor ring packet is being transacted on | |
555 | * @rx_desc: pointer to the EOP Rx descriptor | |
556 | * @skb: pointer to current skb being fixed | |
557 | * | |
558 | * Address the case where we are pulling data in on pages only | |
559 | * and as such no data is present in the skb header. | |
560 | * | |
561 | * In addition if skb is not at least 60 bytes we need to pad it so that | |
562 | * it is large enough to qualify as a valid Ethernet frame. | |
563 | * | |
564 | * Returns true if an error was encountered and skb was freed. | |
565 | **/ | |
566 | static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, | |
567 | union fm10k_rx_desc *rx_desc, | |
568 | struct sk_buff *skb) | |
569 | { | |
570 | if (unlikely((fm10k_test_staterr(rx_desc, | |
571 | FM10K_RXD_STATUS_RXE)))) { | |
572 | dev_kfree_skb_any(skb); | |
573 | rx_ring->rx_stats.errors++; | |
574 | return true; | |
575 | } | |
576 | ||
577 | /* place header in linear portion of buffer */ | |
578 | if (skb_is_nonlinear(skb)) | |
579 | fm10k_pull_tail(rx_ring, rx_desc, skb); | |
580 | ||
581 | /* if skb_pad returns an error the skb was freed */ | |
582 | if (unlikely(skb->len < 60)) { | |
583 | int pad_len = 60 - skb->len; | |
584 | ||
585 | if (skb_pad(skb, pad_len)) | |
586 | return true; | |
587 | __skb_put(skb, pad_len); | |
588 | } | |
589 | ||
590 | return false; | |
591 | } | |
592 | ||
593 | /** | |
594 | * fm10k_receive_skb - helper function to handle rx indications | |
595 | * @q_vector: structure containing interrupt and ring information | |
596 | * @skb: packet to send up | |
597 | **/ | |
598 | static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, | |
599 | struct sk_buff *skb) | |
600 | { | |
601 | napi_gro_receive(&q_vector->napi, skb); | |
602 | } | |
603 | ||
604 | static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, | |
605 | struct fm10k_ring *rx_ring, | |
606 | int budget) | |
607 | { | |
608 | struct sk_buff *skb = rx_ring->skb; | |
609 | unsigned int total_bytes = 0, total_packets = 0; | |
610 | u16 cleaned_count = fm10k_desc_unused(rx_ring); | |
611 | ||
612 | do { | |
613 | union fm10k_rx_desc *rx_desc; | |
614 | ||
615 | /* return some buffers to hardware, one at a time is too slow */ | |
616 | if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { | |
617 | fm10k_alloc_rx_buffers(rx_ring, cleaned_count); | |
618 | cleaned_count = 0; | |
619 | } | |
620 | ||
621 | rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); | |
622 | ||
623 | if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD)) | |
624 | break; | |
625 | ||
626 | /* This memory barrier is needed to keep us from reading | |
627 | * any other fields out of the rx_desc until we know the | |
628 | * RXD_STATUS_DD bit is set | |
629 | */ | |
630 | rmb(); | |
631 | ||
632 | /* retrieve a buffer from the ring */ | |
633 | skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); | |
634 | ||
635 | /* exit if we failed to retrieve a buffer */ | |
636 | if (!skb) | |
637 | break; | |
638 | ||
639 | cleaned_count++; | |
640 | ||
641 | /* fetch next buffer in frame if non-eop */ | |
642 | if (fm10k_is_non_eop(rx_ring, rx_desc)) | |
643 | continue; | |
644 | ||
645 | /* verify the packet layout is correct */ | |
646 | if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { | |
647 | skb = NULL; | |
648 | continue; | |
649 | } | |
650 | ||
651 | /* populate checksum, timestamp, VLAN, and protocol */ | |
652 | total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); | |
653 | ||
654 | fm10k_receive_skb(q_vector, skb); | |
655 | ||
656 | /* reset skb pointer */ | |
657 | skb = NULL; | |
658 | ||
659 | /* update budget accounting */ | |
660 | total_packets++; | |
661 | } while (likely(total_packets < budget)); | |
662 | ||
663 | /* place incomplete frames back on ring for completion */ | |
664 | rx_ring->skb = skb; | |
665 | ||
666 | u64_stats_update_begin(&rx_ring->syncp); | |
667 | rx_ring->stats.packets += total_packets; | |
668 | rx_ring->stats.bytes += total_bytes; | |
669 | u64_stats_update_end(&rx_ring->syncp); | |
670 | q_vector->rx.total_packets += total_packets; | |
671 | q_vector->rx.total_bytes += total_bytes; | |
672 | ||
673 | return total_packets < budget; | |
674 | } | |
675 | ||
76a540d4 AD |
676 | #define VXLAN_HLEN (sizeof(struct udphdr) + 8) |
677 | static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) | |
678 | { | |
679 | struct fm10k_intfc *interface = netdev_priv(skb->dev); | |
680 | struct fm10k_vxlan_port *vxlan_port; | |
681 | ||
682 | /* we can only offload a vxlan if we recognize it as such */ | |
683 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
684 | struct fm10k_vxlan_port, list); | |
685 | ||
686 | if (!vxlan_port) | |
687 | return NULL; | |
688 | if (vxlan_port->port != udp_hdr(skb)->dest) | |
689 | return NULL; | |
690 | ||
691 | /* return offset of udp_hdr plus 8 bytes for VXLAN header */ | |
692 | return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); | |
693 | } | |
694 | ||
695 | #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) | |
696 | #define NVGRE_TNI htons(0x2000) | |
697 | struct fm10k_nvgre_hdr { | |
698 | __be16 flags; | |
699 | __be16 proto; | |
700 | __be32 tni; | |
701 | }; | |
702 | ||
703 | static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) | |
704 | { | |
705 | struct fm10k_nvgre_hdr *nvgre_hdr; | |
706 | int hlen = ip_hdrlen(skb); | |
707 | ||
708 | /* currently only IPv4 is supported due to hlen above */ | |
709 | if (vlan_get_protocol(skb) != htons(ETH_P_IP)) | |
710 | return NULL; | |
711 | ||
712 | /* our transport header should be NVGRE */ | |
713 | nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); | |
714 | ||
715 | /* verify all reserved flags are 0 */ | |
716 | if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) | |
717 | return NULL; | |
718 | ||
719 | /* verify protocol is transparent Ethernet bridging */ | |
720 | if (nvgre_hdr->proto != htons(ETH_P_TEB)) | |
721 | return NULL; | |
722 | ||
723 | /* report start of ethernet header */ | |
724 | if (nvgre_hdr->flags & NVGRE_TNI) | |
725 | return (struct ethhdr *)(nvgre_hdr + 1); | |
726 | ||
727 | return (struct ethhdr *)(&nvgre_hdr->tni); | |
728 | } | |
729 | ||
730 | static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) | |
731 | { | |
732 | struct ethhdr *eth_hdr; | |
733 | u8 l4_hdr = 0; | |
734 | ||
735 | switch (vlan_get_protocol(skb)) { | |
736 | case htons(ETH_P_IP): | |
737 | l4_hdr = ip_hdr(skb)->protocol; | |
738 | break; | |
739 | case htons(ETH_P_IPV6): | |
740 | l4_hdr = ipv6_hdr(skb)->nexthdr; | |
741 | break; | |
742 | default: | |
743 | return 0; | |
744 | } | |
745 | ||
746 | switch (l4_hdr) { | |
747 | case IPPROTO_UDP: | |
748 | eth_hdr = fm10k_port_is_vxlan(skb); | |
749 | break; | |
750 | case IPPROTO_GRE: | |
751 | eth_hdr = fm10k_gre_is_nvgre(skb); | |
752 | break; | |
753 | default: | |
754 | return 0; | |
755 | } | |
756 | ||
757 | if (!eth_hdr) | |
758 | return 0; | |
759 | ||
760 | switch (eth_hdr->h_proto) { | |
761 | case htons(ETH_P_IP): | |
762 | case htons(ETH_P_IPV6): | |
763 | break; | |
764 | default: | |
765 | return 0; | |
766 | } | |
767 | ||
768 | return eth_hdr->h_proto; | |
769 | } | |
770 | ||
771 | static int fm10k_tso(struct fm10k_ring *tx_ring, | |
772 | struct fm10k_tx_buffer *first) | |
773 | { | |
774 | struct sk_buff *skb = first->skb; | |
775 | struct fm10k_tx_desc *tx_desc; | |
776 | unsigned char *th; | |
777 | u8 hdrlen; | |
778 | ||
779 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
780 | return 0; | |
781 | ||
782 | if (!skb_is_gso(skb)) | |
783 | return 0; | |
784 | ||
785 | /* compute header lengths */ | |
786 | if (skb->encapsulation) { | |
787 | if (!fm10k_tx_encap_offload(skb)) | |
788 | goto err_vxlan; | |
789 | th = skb_inner_transport_header(skb); | |
790 | } else { | |
791 | th = skb_transport_header(skb); | |
792 | } | |
793 | ||
794 | /* compute offset from SOF to transport header and add header len */ | |
795 | hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); | |
796 | ||
797 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
798 | ||
799 | /* update gso size and bytecount with header size */ | |
800 | first->gso_segs = skb_shinfo(skb)->gso_segs; | |
801 | first->bytecount += (first->gso_segs - 1) * hdrlen; | |
802 | ||
803 | /* populate Tx descriptor header size and mss */ | |
804 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
805 | tx_desc->hdrlen = hdrlen; | |
806 | tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
807 | ||
808 | return 1; | |
809 | err_vxlan: | |
810 | tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; | |
811 | if (!net_ratelimit()) | |
812 | netdev_err(tx_ring->netdev, | |
813 | "TSO requested for unsupported tunnel, disabling offload\n"); | |
814 | return -1; | |
815 | } | |
816 | ||
817 | static void fm10k_tx_csum(struct fm10k_ring *tx_ring, | |
818 | struct fm10k_tx_buffer *first) | |
819 | { | |
820 | struct sk_buff *skb = first->skb; | |
821 | struct fm10k_tx_desc *tx_desc; | |
822 | union { | |
823 | struct iphdr *ipv4; | |
824 | struct ipv6hdr *ipv6; | |
825 | u8 *raw; | |
826 | } network_hdr; | |
827 | __be16 protocol; | |
828 | u8 l4_hdr = 0; | |
829 | ||
830 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
831 | goto no_csum; | |
832 | ||
833 | if (skb->encapsulation) { | |
834 | protocol = fm10k_tx_encap_offload(skb); | |
835 | if (!protocol) { | |
836 | if (skb_checksum_help(skb)) { | |
837 | dev_warn(tx_ring->dev, | |
838 | "failed to offload encap csum!\n"); | |
839 | tx_ring->tx_stats.csum_err++; | |
840 | } | |
841 | goto no_csum; | |
842 | } | |
843 | network_hdr.raw = skb_inner_network_header(skb); | |
844 | } else { | |
845 | protocol = vlan_get_protocol(skb); | |
846 | network_hdr.raw = skb_network_header(skb); | |
847 | } | |
848 | ||
849 | switch (protocol) { | |
850 | case htons(ETH_P_IP): | |
851 | l4_hdr = network_hdr.ipv4->protocol; | |
852 | break; | |
853 | case htons(ETH_P_IPV6): | |
854 | l4_hdr = network_hdr.ipv6->nexthdr; | |
855 | break; | |
856 | default: | |
857 | if (unlikely(net_ratelimit())) { | |
858 | dev_warn(tx_ring->dev, | |
859 | "partial checksum but ip version=%x!\n", | |
860 | protocol); | |
861 | } | |
862 | tx_ring->tx_stats.csum_err++; | |
863 | goto no_csum; | |
864 | } | |
865 | ||
866 | switch (l4_hdr) { | |
867 | case IPPROTO_TCP: | |
868 | case IPPROTO_UDP: | |
869 | break; | |
870 | case IPPROTO_GRE: | |
871 | if (skb->encapsulation) | |
872 | break; | |
873 | default: | |
874 | if (unlikely(net_ratelimit())) { | |
875 | dev_warn(tx_ring->dev, | |
876 | "partial checksum but l4 proto=%x!\n", | |
877 | l4_hdr); | |
878 | } | |
879 | tx_ring->tx_stats.csum_err++; | |
880 | goto no_csum; | |
881 | } | |
882 | ||
883 | /* update TX checksum flag */ | |
884 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
885 | ||
886 | no_csum: | |
887 | /* populate Tx descriptor header size and mss */ | |
888 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
889 | tx_desc->hdrlen = 0; | |
890 | tx_desc->mss = 0; | |
891 | } | |
892 | ||
893 | #define FM10K_SET_FLAG(_input, _flag, _result) \ | |
894 | ((_flag <= _result) ? \ | |
895 | ((u32)(_input & _flag) * (_result / _flag)) : \ | |
896 | ((u32)(_input & _flag) / (_flag / _result))) | |
897 | ||
898 | static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) | |
899 | { | |
900 | /* set type for advanced descriptor with frame checksum insertion */ | |
901 | u32 desc_flags = 0; | |
902 | ||
a211e013 AD |
903 | /* set timestamping bits */ |
904 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
905 | likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | |
906 | desc_flags |= FM10K_TXD_FLAG_TIME; | |
907 | ||
76a540d4 AD |
908 | /* set checksum offload bits */ |
909 | desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, | |
910 | FM10K_TXD_FLAG_CSUM); | |
911 | ||
912 | return desc_flags; | |
913 | } | |
914 | ||
b101c962 AD |
915 | static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, |
916 | struct fm10k_tx_desc *tx_desc, u16 i, | |
917 | dma_addr_t dma, unsigned int size, u8 desc_flags) | |
918 | { | |
919 | /* set RS and INT for last frame in a cache line */ | |
920 | if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) | |
921 | desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; | |
922 | ||
923 | /* record values to descriptor */ | |
924 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
925 | tx_desc->flags = desc_flags; | |
926 | tx_desc->buflen = cpu_to_le16(size); | |
927 | ||
928 | /* return true if we just wrapped the ring */ | |
929 | return i == tx_ring->count; | |
930 | } | |
931 | ||
2c2b2f0c AD |
932 | static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) |
933 | { | |
934 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
935 | ||
936 | smp_mb(); | |
937 | ||
938 | /* We need to check again in a case another CPU has just | |
939 | * made room available. */ | |
940 | if (likely(fm10k_desc_unused(tx_ring) < size)) | |
941 | return -EBUSY; | |
942 | ||
943 | /* A reprieve! - use start_queue because it doesn't call schedule */ | |
944 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
945 | ++tx_ring->tx_stats.restart_queue; | |
946 | return 0; | |
947 | } | |
948 | ||
949 | static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) | |
950 | { | |
951 | if (likely(fm10k_desc_unused(tx_ring) >= size)) | |
952 | return 0; | |
953 | return __fm10k_maybe_stop_tx(tx_ring, size); | |
954 | } | |
955 | ||
b101c962 AD |
956 | static void fm10k_tx_map(struct fm10k_ring *tx_ring, |
957 | struct fm10k_tx_buffer *first) | |
958 | { | |
959 | struct sk_buff *skb = first->skb; | |
960 | struct fm10k_tx_buffer *tx_buffer; | |
961 | struct fm10k_tx_desc *tx_desc; | |
962 | struct skb_frag_struct *frag; | |
963 | unsigned char *data; | |
964 | dma_addr_t dma; | |
965 | unsigned int data_len, size; | |
76a540d4 | 966 | u32 tx_flags = first->tx_flags; |
b101c962 | 967 | u16 i = tx_ring->next_to_use; |
76a540d4 | 968 | u8 flags = fm10k_tx_desc_flags(skb, tx_flags); |
b101c962 AD |
969 | |
970 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
971 | ||
972 | /* add HW VLAN tag */ | |
973 | if (vlan_tx_tag_present(skb)) | |
974 | tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | |
975 | else | |
976 | tx_desc->vlan = 0; | |
977 | ||
978 | size = skb_headlen(skb); | |
979 | data = skb->data; | |
980 | ||
981 | dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); | |
982 | ||
983 | data_len = skb->data_len; | |
984 | tx_buffer = first; | |
985 | ||
986 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { | |
987 | if (dma_mapping_error(tx_ring->dev, dma)) | |
988 | goto dma_error; | |
989 | ||
990 | /* record length, and DMA address */ | |
991 | dma_unmap_len_set(tx_buffer, len, size); | |
992 | dma_unmap_addr_set(tx_buffer, dma, dma); | |
993 | ||
994 | while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { | |
995 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, | |
996 | FM10K_MAX_DATA_PER_TXD, flags)) { | |
997 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
998 | i = 0; | |
999 | } | |
1000 | ||
1001 | dma += FM10K_MAX_DATA_PER_TXD; | |
1002 | size -= FM10K_MAX_DATA_PER_TXD; | |
1003 | } | |
1004 | ||
1005 | if (likely(!data_len)) | |
1006 | break; | |
1007 | ||
1008 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, | |
1009 | dma, size, flags)) { | |
1010 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1011 | i = 0; | |
1012 | } | |
1013 | ||
1014 | size = skb_frag_size(frag); | |
1015 | data_len -= size; | |
1016 | ||
1017 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, | |
1018 | DMA_TO_DEVICE); | |
1019 | ||
1020 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1021 | } | |
1022 | ||
1023 | /* write last descriptor with LAST bit set */ | |
1024 | flags |= FM10K_TXD_FLAG_LAST; | |
1025 | ||
1026 | if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) | |
1027 | i = 0; | |
1028 | ||
1029 | /* record bytecount for BQL */ | |
1030 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); | |
1031 | ||
1032 | /* record SW timestamp if HW timestamp is not available */ | |
1033 | skb_tx_timestamp(first->skb); | |
1034 | ||
1035 | /* Force memory writes to complete before letting h/w know there | |
1036 | * are new descriptors to fetch. (Only applicable for weak-ordered | |
1037 | * memory model archs, such as IA-64). | |
1038 | * | |
1039 | * We also need this memory barrier to make certain all of the | |
1040 | * status bits have been updated before next_to_watch is written. | |
1041 | */ | |
1042 | wmb(); | |
1043 | ||
1044 | /* set next_to_watch value indicating a packet is present */ | |
1045 | first->next_to_watch = tx_desc; | |
1046 | ||
1047 | tx_ring->next_to_use = i; | |
1048 | ||
2c2b2f0c AD |
1049 | /* Make sure there is space in the ring for the next send. */ |
1050 | fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); | |
1051 | ||
b101c962 | 1052 | /* notify HW of packet */ |
2c2b2f0c AD |
1053 | if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
1054 | writel(i, tx_ring->tail); | |
b101c962 | 1055 | |
2c2b2f0c AD |
1056 | /* we need this if more than one processor can write to our tail |
1057 | * at a time, it synchronizes IO on IA64/Altix systems | |
1058 | */ | |
1059 | mmiowb(); | |
1060 | } | |
b101c962 AD |
1061 | |
1062 | return; | |
1063 | dma_error: | |
1064 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | |
1065 | ||
1066 | /* clear dma mappings for failed tx_buffer map */ | |
1067 | for (;;) { | |
1068 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1069 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
1070 | if (tx_buffer == first) | |
1071 | break; | |
1072 | if (i == 0) | |
1073 | i = tx_ring->count; | |
1074 | i--; | |
1075 | } | |
1076 | ||
1077 | tx_ring->next_to_use = i; | |
1078 | } | |
1079 | ||
b101c962 AD |
1080 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, |
1081 | struct fm10k_ring *tx_ring) | |
1082 | { | |
1083 | struct fm10k_tx_buffer *first; | |
76a540d4 | 1084 | int tso; |
b101c962 AD |
1085 | u32 tx_flags = 0; |
1086 | #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD | |
1087 | unsigned short f; | |
1088 | #endif | |
1089 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); | |
1090 | ||
1091 | /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, | |
1092 | * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, | |
1093 | * + 2 desc gap to keep tail from touching head | |
1094 | * otherwise try next time | |
1095 | */ | |
1096 | #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD | |
1097 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | |
1098 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | |
1099 | #else | |
1100 | count += skb_shinfo(skb)->nr_frags; | |
1101 | #endif | |
1102 | if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { | |
1103 | tx_ring->tx_stats.tx_busy++; | |
1104 | return NETDEV_TX_BUSY; | |
1105 | } | |
1106 | ||
1107 | /* record the location of the first descriptor for this packet */ | |
1108 | first = &tx_ring->tx_buffer[tx_ring->next_to_use]; | |
1109 | first->skb = skb; | |
1110 | first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); | |
1111 | first->gso_segs = 1; | |
1112 | ||
1113 | /* record initial flags and protocol */ | |
1114 | first->tx_flags = tx_flags; | |
1115 | ||
76a540d4 AD |
1116 | tso = fm10k_tso(tx_ring, first); |
1117 | if (tso < 0) | |
1118 | goto out_drop; | |
1119 | else if (!tso) | |
1120 | fm10k_tx_csum(tx_ring, first); | |
1121 | ||
b101c962 AD |
1122 | fm10k_tx_map(tx_ring, first); |
1123 | ||
76a540d4 AD |
1124 | return NETDEV_TX_OK; |
1125 | ||
1126 | out_drop: | |
1127 | dev_kfree_skb_any(first->skb); | |
1128 | first->skb = NULL; | |
1129 | ||
b101c962 AD |
1130 | return NETDEV_TX_OK; |
1131 | } | |
1132 | ||
1133 | static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) | |
1134 | { | |
1135 | return ring->stats.packets; | |
1136 | } | |
1137 | ||
1138 | static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) | |
1139 | { | |
1140 | /* use SW head and tail until we have real hardware */ | |
1141 | u32 head = ring->next_to_clean; | |
1142 | u32 tail = ring->next_to_use; | |
1143 | ||
1144 | return ((head <= tail) ? tail : tail + ring->count) - head; | |
1145 | } | |
1146 | ||
1147 | bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) | |
1148 | { | |
1149 | u32 tx_done = fm10k_get_tx_completed(tx_ring); | |
1150 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | |
1151 | u32 tx_pending = fm10k_get_tx_pending(tx_ring); | |
1152 | ||
1153 | clear_check_for_tx_hang(tx_ring); | |
1154 | ||
1155 | /* Check for a hung queue, but be thorough. This verifies | |
1156 | * that a transmit has been completed since the previous | |
1157 | * check AND there is at least one packet pending. By | |
1158 | * requiring this to fail twice we avoid races with | |
1159 | * clearing the ARMED bit and conditions where we | |
1160 | * run the check_tx_hang logic with a transmit completion | |
1161 | * pending but without time to complete it yet. | |
1162 | */ | |
1163 | if (!tx_pending || (tx_done_old != tx_done)) { | |
1164 | /* update completed stats and continue */ | |
1165 | tx_ring->tx_stats.tx_done_old = tx_done; | |
1166 | /* reset the countdown */ | |
1167 | clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1168 | ||
1169 | return false; | |
1170 | } | |
1171 | ||
1172 | /* make sure it is true for two checks in a row */ | |
1173 | return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1174 | } | |
1175 | ||
1176 | /** | |
1177 | * fm10k_tx_timeout_reset - initiate reset due to Tx timeout | |
1178 | * @interface: driver private struct | |
1179 | **/ | |
1180 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) | |
1181 | { | |
1182 | /* Do the reset outside of interrupt context */ | |
1183 | if (!test_bit(__FM10K_DOWN, &interface->state)) { | |
1184 | netdev_err(interface->netdev, "Reset interface\n"); | |
1185 | interface->tx_timeout_count++; | |
1186 | interface->flags |= FM10K_FLAG_RESET_REQUESTED; | |
1187 | fm10k_service_event_schedule(interface); | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | /** | |
1192 | * fm10k_clean_tx_irq - Reclaim resources after transmit completes | |
1193 | * @q_vector: structure containing interrupt and ring information | |
1194 | * @tx_ring: tx ring to clean | |
1195 | **/ | |
1196 | static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, | |
1197 | struct fm10k_ring *tx_ring) | |
1198 | { | |
1199 | struct fm10k_intfc *interface = q_vector->interface; | |
1200 | struct fm10k_tx_buffer *tx_buffer; | |
1201 | struct fm10k_tx_desc *tx_desc; | |
1202 | unsigned int total_bytes = 0, total_packets = 0; | |
1203 | unsigned int budget = q_vector->tx.work_limit; | |
1204 | unsigned int i = tx_ring->next_to_clean; | |
1205 | ||
1206 | if (test_bit(__FM10K_DOWN, &interface->state)) | |
1207 | return true; | |
1208 | ||
1209 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1210 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
1211 | i -= tx_ring->count; | |
1212 | ||
1213 | do { | |
1214 | struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; | |
1215 | ||
1216 | /* if next_to_watch is not set then there is no work pending */ | |
1217 | if (!eop_desc) | |
1218 | break; | |
1219 | ||
1220 | /* prevent any other reads prior to eop_desc */ | |
1221 | read_barrier_depends(); | |
1222 | ||
1223 | /* if DD is not set pending work has not been completed */ | |
1224 | if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) | |
1225 | break; | |
1226 | ||
1227 | /* clear next_to_watch to prevent false hangs */ | |
1228 | tx_buffer->next_to_watch = NULL; | |
1229 | ||
1230 | /* update the statistics for this packet */ | |
1231 | total_bytes += tx_buffer->bytecount; | |
1232 | total_packets += tx_buffer->gso_segs; | |
1233 | ||
1234 | /* free the skb */ | |
1235 | dev_consume_skb_any(tx_buffer->skb); | |
1236 | ||
1237 | /* unmap skb header data */ | |
1238 | dma_unmap_single(tx_ring->dev, | |
1239 | dma_unmap_addr(tx_buffer, dma), | |
1240 | dma_unmap_len(tx_buffer, len), | |
1241 | DMA_TO_DEVICE); | |
1242 | ||
1243 | /* clear tx_buffer data */ | |
1244 | tx_buffer->skb = NULL; | |
1245 | dma_unmap_len_set(tx_buffer, len, 0); | |
1246 | ||
1247 | /* unmap remaining buffers */ | |
1248 | while (tx_desc != eop_desc) { | |
1249 | tx_buffer++; | |
1250 | tx_desc++; | |
1251 | i++; | |
1252 | if (unlikely(!i)) { | |
1253 | i -= tx_ring->count; | |
1254 | tx_buffer = tx_ring->tx_buffer; | |
1255 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1256 | } | |
1257 | ||
1258 | /* unmap any remaining paged data */ | |
1259 | if (dma_unmap_len(tx_buffer, len)) { | |
1260 | dma_unmap_page(tx_ring->dev, | |
1261 | dma_unmap_addr(tx_buffer, dma), | |
1262 | dma_unmap_len(tx_buffer, len), | |
1263 | DMA_TO_DEVICE); | |
1264 | dma_unmap_len_set(tx_buffer, len, 0); | |
1265 | } | |
1266 | } | |
1267 | ||
1268 | /* move us one more past the eop_desc for start of next pkt */ | |
1269 | tx_buffer++; | |
1270 | tx_desc++; | |
1271 | i++; | |
1272 | if (unlikely(!i)) { | |
1273 | i -= tx_ring->count; | |
1274 | tx_buffer = tx_ring->tx_buffer; | |
1275 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1276 | } | |
1277 | ||
1278 | /* issue prefetch for next Tx descriptor */ | |
1279 | prefetch(tx_desc); | |
1280 | ||
1281 | /* update budget accounting */ | |
1282 | budget--; | |
1283 | } while (likely(budget)); | |
1284 | ||
1285 | i += tx_ring->count; | |
1286 | tx_ring->next_to_clean = i; | |
1287 | u64_stats_update_begin(&tx_ring->syncp); | |
1288 | tx_ring->stats.bytes += total_bytes; | |
1289 | tx_ring->stats.packets += total_packets; | |
1290 | u64_stats_update_end(&tx_ring->syncp); | |
1291 | q_vector->tx.total_bytes += total_bytes; | |
1292 | q_vector->tx.total_packets += total_packets; | |
1293 | ||
1294 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { | |
1295 | /* schedule immediate reset if we believe we hung */ | |
1296 | struct fm10k_hw *hw = &interface->hw; | |
1297 | ||
1298 | netif_err(interface, drv, tx_ring->netdev, | |
1299 | "Detected Tx Unit Hang\n" | |
1300 | " Tx Queue <%d>\n" | |
1301 | " TDH, TDT <%x>, <%x>\n" | |
1302 | " next_to_use <%x>\n" | |
1303 | " next_to_clean <%x>\n", | |
1304 | tx_ring->queue_index, | |
1305 | fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), | |
1306 | fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), | |
1307 | tx_ring->next_to_use, i); | |
1308 | ||
1309 | netif_stop_subqueue(tx_ring->netdev, | |
1310 | tx_ring->queue_index); | |
1311 | ||
1312 | netif_info(interface, probe, tx_ring->netdev, | |
1313 | "tx hang %d detected on queue %d, resetting interface\n", | |
1314 | interface->tx_timeout_count + 1, | |
1315 | tx_ring->queue_index); | |
1316 | ||
1317 | fm10k_tx_timeout_reset(interface); | |
1318 | ||
1319 | /* the netdev is about to reset, no point in enabling stuff */ | |
1320 | return true; | |
1321 | } | |
1322 | ||
1323 | /* notify netdev of completed buffers */ | |
1324 | netdev_tx_completed_queue(txring_txq(tx_ring), | |
1325 | total_packets, total_bytes); | |
1326 | ||
1327 | #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) | |
1328 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | |
1329 | (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { | |
1330 | /* Make sure that anybody stopping the queue after this | |
1331 | * sees the new next_to_clean. | |
1332 | */ | |
1333 | smp_mb(); | |
1334 | if (__netif_subqueue_stopped(tx_ring->netdev, | |
1335 | tx_ring->queue_index) && | |
1336 | !test_bit(__FM10K_DOWN, &interface->state)) { | |
1337 | netif_wake_subqueue(tx_ring->netdev, | |
1338 | tx_ring->queue_index); | |
1339 | ++tx_ring->tx_stats.restart_queue; | |
1340 | } | |
1341 | } | |
1342 | ||
1343 | return !!budget; | |
1344 | } | |
1345 | ||
18283cad AD |
1346 | /** |
1347 | * fm10k_update_itr - update the dynamic ITR value based on packet size | |
1348 | * | |
1349 | * Stores a new ITR value based on strictly on packet size. The | |
1350 | * divisors and thresholds used by this function were determined based | |
1351 | * on theoretical maximum wire speed and testing data, in order to | |
1352 | * minimize response time while increasing bulk throughput. | |
1353 | * | |
1354 | * @ring_container: Container for rings to have ITR updated | |
1355 | **/ | |
1356 | static void fm10k_update_itr(struct fm10k_ring_container *ring_container) | |
1357 | { | |
1358 | unsigned int avg_wire_size, packets; | |
1359 | ||
1360 | /* Only update ITR if we are using adaptive setting */ | |
1361 | if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) | |
1362 | goto clear_counts; | |
1363 | ||
1364 | packets = ring_container->total_packets; | |
1365 | if (!packets) | |
1366 | goto clear_counts; | |
1367 | ||
1368 | avg_wire_size = ring_container->total_bytes / packets; | |
1369 | ||
1370 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ | |
1371 | avg_wire_size += 24; | |
1372 | ||
1373 | /* Don't starve jumbo frames */ | |
1374 | if (avg_wire_size > 3000) | |
1375 | avg_wire_size = 3000; | |
1376 | ||
1377 | /* Give a little boost to mid-size frames */ | |
1378 | if ((avg_wire_size > 300) && (avg_wire_size < 1200)) | |
1379 | avg_wire_size /= 3; | |
1380 | else | |
1381 | avg_wire_size /= 2; | |
1382 | ||
1383 | /* write back value and retain adaptive flag */ | |
1384 | ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; | |
1385 | ||
1386 | clear_counts: | |
1387 | ring_container->total_bytes = 0; | |
1388 | ring_container->total_packets = 0; | |
1389 | } | |
1390 | ||
1391 | static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) | |
1392 | { | |
1393 | /* Enable auto-mask and clear the current mask */ | |
1394 | u32 itr = FM10K_ITR_ENABLE; | |
1395 | ||
1396 | /* Update Tx ITR */ | |
1397 | fm10k_update_itr(&q_vector->tx); | |
1398 | ||
1399 | /* Update Rx ITR */ | |
1400 | fm10k_update_itr(&q_vector->rx); | |
1401 | ||
1402 | /* Store Tx itr in timer slot 0 */ | |
1403 | itr |= (q_vector->tx.itr & FM10K_ITR_MAX); | |
1404 | ||
1405 | /* Shift Rx itr to timer slot 1 */ | |
1406 | itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; | |
1407 | ||
1408 | /* Write the final value to the ITR register */ | |
1409 | writel(itr, q_vector->itr); | |
1410 | } | |
1411 | ||
1412 | static int fm10k_poll(struct napi_struct *napi, int budget) | |
1413 | { | |
1414 | struct fm10k_q_vector *q_vector = | |
1415 | container_of(napi, struct fm10k_q_vector, napi); | |
b101c962 AD |
1416 | struct fm10k_ring *ring; |
1417 | int per_ring_budget; | |
1418 | bool clean_complete = true; | |
1419 | ||
1420 | fm10k_for_each_ring(ring, q_vector->tx) | |
1421 | clean_complete &= fm10k_clean_tx_irq(q_vector, ring); | |
1422 | ||
1423 | /* attempt to distribute budget to each queue fairly, but don't | |
1424 | * allow the budget to go below 1 because we'll exit polling | |
1425 | */ | |
1426 | if (q_vector->rx.count > 1) | |
1427 | per_ring_budget = max(budget/q_vector->rx.count, 1); | |
1428 | else | |
1429 | per_ring_budget = budget; | |
1430 | ||
1431 | fm10k_for_each_ring(ring, q_vector->rx) | |
1432 | clean_complete &= fm10k_clean_rx_irq(q_vector, ring, | |
1433 | per_ring_budget); | |
1434 | ||
1435 | /* If all work not completed, return budget and keep polling */ | |
1436 | if (!clean_complete) | |
1437 | return budget; | |
18283cad AD |
1438 | |
1439 | /* all work done, exit the polling mode */ | |
1440 | napi_complete(napi); | |
1441 | ||
1442 | /* re-enable the q_vector */ | |
1443 | fm10k_qv_enable(q_vector); | |
1444 | ||
1445 | return 0; | |
1446 | } | |
1447 | ||
aa3ac822 AD |
1448 | /** |
1449 | * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device | |
1450 | * @interface: board private structure to initialize | |
1451 | * | |
1452 | * When QoS (Quality of Service) is enabled, allocate queues for | |
1453 | * each traffic class. If multiqueue isn't available,then abort QoS | |
1454 | * initialization. | |
1455 | * | |
1456 | * This function handles all combinations of Qos and RSS. | |
1457 | * | |
1458 | **/ | |
1459 | static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) | |
1460 | { | |
1461 | struct net_device *dev = interface->netdev; | |
1462 | struct fm10k_ring_feature *f; | |
1463 | int rss_i, i; | |
1464 | int pcs; | |
1465 | ||
1466 | /* Map queue offset and counts onto allocated tx queues */ | |
1467 | pcs = netdev_get_num_tc(dev); | |
1468 | ||
1469 | if (pcs <= 1) | |
1470 | return false; | |
1471 | ||
1472 | /* set QoS mask and indices */ | |
1473 | f = &interface->ring_feature[RING_F_QOS]; | |
1474 | f->indices = pcs; | |
1475 | f->mask = (1 << fls(pcs - 1)) - 1; | |
1476 | ||
1477 | /* determine the upper limit for our current DCB mode */ | |
1478 | rss_i = interface->hw.mac.max_queues / pcs; | |
1479 | rss_i = 1 << (fls(rss_i) - 1); | |
1480 | ||
1481 | /* set RSS mask and indices */ | |
1482 | f = &interface->ring_feature[RING_F_RSS]; | |
1483 | rss_i = min_t(u16, rss_i, f->limit); | |
1484 | f->indices = rss_i; | |
1485 | f->mask = (1 << fls(rss_i - 1)) - 1; | |
1486 | ||
1487 | /* configure pause class to queue mapping */ | |
1488 | for (i = 0; i < pcs; i++) | |
1489 | netdev_set_tc_queue(dev, i, rss_i, rss_i * i); | |
1490 | ||
1491 | interface->num_rx_queues = rss_i * pcs; | |
1492 | interface->num_tx_queues = rss_i * pcs; | |
1493 | ||
1494 | return true; | |
1495 | } | |
1496 | ||
1497 | /** | |
1498 | * fm10k_set_rss_queues: Allocate queues for RSS | |
1499 | * @interface: board private structure to initialize | |
1500 | * | |
1501 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | |
1502 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | |
1503 | * | |
1504 | **/ | |
1505 | static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) | |
1506 | { | |
1507 | struct fm10k_ring_feature *f; | |
1508 | u16 rss_i; | |
1509 | ||
1510 | f = &interface->ring_feature[RING_F_RSS]; | |
1511 | rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); | |
1512 | ||
1513 | /* record indices and power of 2 mask for RSS */ | |
1514 | f->indices = rss_i; | |
1515 | f->mask = (1 << fls(rss_i - 1)) - 1; | |
1516 | ||
1517 | interface->num_rx_queues = rss_i; | |
1518 | interface->num_tx_queues = rss_i; | |
1519 | ||
1520 | return true; | |
1521 | } | |
1522 | ||
18283cad AD |
1523 | /** |
1524 | * fm10k_set_num_queues: Allocate queues for device, feature dependent | |
1525 | * @interface: board private structure to initialize | |
1526 | * | |
1527 | * This is the top level queue allocation routine. The order here is very | |
1528 | * important, starting with the "most" number of features turned on at once, | |
1529 | * and ending with the smallest set of features. This way large combinations | |
1530 | * can be allocated if they're turned on, and smaller combinations are the | |
1531 | * fallthrough conditions. | |
1532 | * | |
1533 | **/ | |
1534 | static void fm10k_set_num_queues(struct fm10k_intfc *interface) | |
1535 | { | |
1536 | /* Start with base case */ | |
1537 | interface->num_rx_queues = 1; | |
1538 | interface->num_tx_queues = 1; | |
aa3ac822 AD |
1539 | |
1540 | if (fm10k_set_qos_queues(interface)) | |
1541 | return; | |
1542 | ||
1543 | fm10k_set_rss_queues(interface); | |
18283cad AD |
1544 | } |
1545 | ||
1546 | /** | |
1547 | * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector | |
1548 | * @interface: board private structure to initialize | |
1549 | * @v_count: q_vectors allocated on interface, used for ring interleaving | |
1550 | * @v_idx: index of vector in interface struct | |
1551 | * @txr_count: total number of Tx rings to allocate | |
1552 | * @txr_idx: index of first Tx ring to allocate | |
1553 | * @rxr_count: total number of Rx rings to allocate | |
1554 | * @rxr_idx: index of first Rx ring to allocate | |
1555 | * | |
1556 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
1557 | **/ | |
1558 | static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, | |
1559 | unsigned int v_count, unsigned int v_idx, | |
1560 | unsigned int txr_count, unsigned int txr_idx, | |
1561 | unsigned int rxr_count, unsigned int rxr_idx) | |
1562 | { | |
1563 | struct fm10k_q_vector *q_vector; | |
e27ef599 | 1564 | struct fm10k_ring *ring; |
18283cad AD |
1565 | int ring_count, size; |
1566 | ||
1567 | ring_count = txr_count + rxr_count; | |
e27ef599 AD |
1568 | size = sizeof(struct fm10k_q_vector) + |
1569 | (sizeof(struct fm10k_ring) * ring_count); | |
18283cad AD |
1570 | |
1571 | /* allocate q_vector and rings */ | |
1572 | q_vector = kzalloc(size, GFP_KERNEL); | |
1573 | if (!q_vector) | |
1574 | return -ENOMEM; | |
1575 | ||
1576 | /* initialize NAPI */ | |
1577 | netif_napi_add(interface->netdev, &q_vector->napi, | |
1578 | fm10k_poll, NAPI_POLL_WEIGHT); | |
1579 | ||
1580 | /* tie q_vector and interface together */ | |
1581 | interface->q_vector[v_idx] = q_vector; | |
1582 | q_vector->interface = interface; | |
1583 | q_vector->v_idx = v_idx; | |
1584 | ||
e27ef599 AD |
1585 | /* initialize pointer to rings */ |
1586 | ring = q_vector->ring; | |
1587 | ||
18283cad | 1588 | /* save Tx ring container info */ |
e27ef599 AD |
1589 | q_vector->tx.ring = ring; |
1590 | q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; | |
18283cad AD |
1591 | q_vector->tx.itr = interface->tx_itr; |
1592 | q_vector->tx.count = txr_count; | |
1593 | ||
e27ef599 AD |
1594 | while (txr_count) { |
1595 | /* assign generic ring traits */ | |
1596 | ring->dev = &interface->pdev->dev; | |
1597 | ring->netdev = interface->netdev; | |
1598 | ||
1599 | /* configure backlink on ring */ | |
1600 | ring->q_vector = q_vector; | |
1601 | ||
1602 | /* apply Tx specific ring traits */ | |
1603 | ring->count = interface->tx_ring_count; | |
1604 | ring->queue_index = txr_idx; | |
1605 | ||
1606 | /* assign ring to interface */ | |
1607 | interface->tx_ring[txr_idx] = ring; | |
1608 | ||
1609 | /* update count and index */ | |
1610 | txr_count--; | |
1611 | txr_idx += v_count; | |
1612 | ||
1613 | /* push pointer to next ring */ | |
1614 | ring++; | |
1615 | } | |
1616 | ||
18283cad | 1617 | /* save Rx ring container info */ |
e27ef599 | 1618 | q_vector->rx.ring = ring; |
18283cad AD |
1619 | q_vector->rx.itr = interface->rx_itr; |
1620 | q_vector->rx.count = rxr_count; | |
1621 | ||
e27ef599 AD |
1622 | while (rxr_count) { |
1623 | /* assign generic ring traits */ | |
1624 | ring->dev = &interface->pdev->dev; | |
1625 | ring->netdev = interface->netdev; | |
5cd5e2e9 | 1626 | rcu_assign_pointer(ring->l2_accel, interface->l2_accel); |
e27ef599 AD |
1627 | |
1628 | /* configure backlink on ring */ | |
1629 | ring->q_vector = q_vector; | |
1630 | ||
1631 | /* apply Rx specific ring traits */ | |
1632 | ring->count = interface->rx_ring_count; | |
1633 | ring->queue_index = rxr_idx; | |
1634 | ||
1635 | /* assign ring to interface */ | |
1636 | interface->rx_ring[rxr_idx] = ring; | |
1637 | ||
1638 | /* update count and index */ | |
1639 | rxr_count--; | |
1640 | rxr_idx += v_count; | |
1641 | ||
1642 | /* push pointer to next ring */ | |
1643 | ring++; | |
1644 | } | |
1645 | ||
7461fd91 AD |
1646 | fm10k_dbg_q_vector_init(q_vector); |
1647 | ||
18283cad AD |
1648 | return 0; |
1649 | } | |
1650 | ||
1651 | /** | |
1652 | * fm10k_free_q_vector - Free memory allocated for specific interrupt vector | |
1653 | * @interface: board private structure to initialize | |
1654 | * @v_idx: Index of vector to be freed | |
1655 | * | |
1656 | * This function frees the memory allocated to the q_vector. In addition if | |
1657 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1658 | * to freeing the q_vector. | |
1659 | **/ | |
1660 | static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) | |
1661 | { | |
1662 | struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; | |
e27ef599 AD |
1663 | struct fm10k_ring *ring; |
1664 | ||
7461fd91 AD |
1665 | fm10k_dbg_q_vector_exit(q_vector); |
1666 | ||
e27ef599 AD |
1667 | fm10k_for_each_ring(ring, q_vector->tx) |
1668 | interface->tx_ring[ring->queue_index] = NULL; | |
1669 | ||
1670 | fm10k_for_each_ring(ring, q_vector->rx) | |
1671 | interface->rx_ring[ring->queue_index] = NULL; | |
18283cad AD |
1672 | |
1673 | interface->q_vector[v_idx] = NULL; | |
1674 | netif_napi_del(&q_vector->napi); | |
1675 | kfree_rcu(q_vector, rcu); | |
1676 | } | |
1677 | ||
1678 | /** | |
1679 | * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors | |
1680 | * @interface: board private structure to initialize | |
1681 | * | |
1682 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
1683 | * return -ENOMEM. | |
1684 | **/ | |
1685 | static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) | |
1686 | { | |
1687 | unsigned int q_vectors = interface->num_q_vectors; | |
1688 | unsigned int rxr_remaining = interface->num_rx_queues; | |
1689 | unsigned int txr_remaining = interface->num_tx_queues; | |
1690 | unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; | |
1691 | int err; | |
1692 | ||
1693 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | |
1694 | for (; rxr_remaining; v_idx++) { | |
1695 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1696 | 0, 0, 1, rxr_idx); | |
1697 | if (err) | |
1698 | goto err_out; | |
1699 | ||
1700 | /* update counts and index */ | |
1701 | rxr_remaining--; | |
1702 | rxr_idx++; | |
1703 | } | |
1704 | } | |
1705 | ||
1706 | for (; v_idx < q_vectors; v_idx++) { | |
1707 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | |
1708 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | |
1709 | ||
1710 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1711 | tqpv, txr_idx, | |
1712 | rqpv, rxr_idx); | |
1713 | ||
1714 | if (err) | |
1715 | goto err_out; | |
1716 | ||
1717 | /* update counts and index */ | |
1718 | rxr_remaining -= rqpv; | |
1719 | txr_remaining -= tqpv; | |
1720 | rxr_idx++; | |
1721 | txr_idx++; | |
1722 | } | |
1723 | ||
1724 | return 0; | |
1725 | ||
1726 | err_out: | |
1727 | interface->num_tx_queues = 0; | |
1728 | interface->num_rx_queues = 0; | |
1729 | interface->num_q_vectors = 0; | |
1730 | ||
1731 | while (v_idx--) | |
1732 | fm10k_free_q_vector(interface, v_idx); | |
1733 | ||
1734 | return -ENOMEM; | |
1735 | } | |
1736 | ||
1737 | /** | |
1738 | * fm10k_free_q_vectors - Free memory allocated for interrupt vectors | |
1739 | * @interface: board private structure to initialize | |
1740 | * | |
1741 | * This function frees the memory allocated to the q_vectors. In addition if | |
1742 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1743 | * to freeing the q_vector. | |
1744 | **/ | |
1745 | static void fm10k_free_q_vectors(struct fm10k_intfc *interface) | |
1746 | { | |
1747 | int v_idx = interface->num_q_vectors; | |
1748 | ||
1749 | interface->num_tx_queues = 0; | |
1750 | interface->num_rx_queues = 0; | |
1751 | interface->num_q_vectors = 0; | |
1752 | ||
1753 | while (v_idx--) | |
1754 | fm10k_free_q_vector(interface, v_idx); | |
1755 | } | |
1756 | ||
1757 | /** | |
1758 | * f10k_reset_msix_capability - reset MSI-X capability | |
1759 | * @interface: board private structure to initialize | |
1760 | * | |
1761 | * Reset the MSI-X capability back to its starting state | |
1762 | **/ | |
1763 | static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) | |
1764 | { | |
1765 | pci_disable_msix(interface->pdev); | |
1766 | kfree(interface->msix_entries); | |
1767 | interface->msix_entries = NULL; | |
1768 | } | |
1769 | ||
1770 | /** | |
1771 | * f10k_init_msix_capability - configure MSI-X capability | |
1772 | * @interface: board private structure to initialize | |
1773 | * | |
1774 | * Attempt to configure the interrupts using the best available | |
1775 | * capabilities of the hardware and the kernel. | |
1776 | **/ | |
1777 | static int fm10k_init_msix_capability(struct fm10k_intfc *interface) | |
1778 | { | |
1779 | struct fm10k_hw *hw = &interface->hw; | |
1780 | int v_budget, vector; | |
1781 | ||
1782 | /* It's easy to be greedy for MSI-X vectors, but it really | |
1783 | * doesn't do us much good if we have a lot more vectors | |
1784 | * than CPU's. So let's be conservative and only ask for | |
1785 | * (roughly) the same number of vectors as there are CPU's. | |
1786 | * the default is to use pairs of vectors | |
1787 | */ | |
1788 | v_budget = max(interface->num_rx_queues, interface->num_tx_queues); | |
1789 | v_budget = min_t(u16, v_budget, num_online_cpus()); | |
1790 | ||
1791 | /* account for vectors not related to queues */ | |
1792 | v_budget += NON_Q_VECTORS(hw); | |
1793 | ||
1794 | /* At the same time, hardware can only support a maximum of | |
1795 | * hw.mac->max_msix_vectors vectors. With features | |
1796 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | |
1797 | * descriptor queues supported by our device. Thus, we cap it off in | |
1798 | * those rare cases where the cpu count also exceeds our vector limit. | |
1799 | */ | |
1800 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | |
1801 | ||
1802 | /* A failure in MSI-X entry allocation is fatal. */ | |
1803 | interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), | |
1804 | GFP_KERNEL); | |
1805 | if (!interface->msix_entries) | |
1806 | return -ENOMEM; | |
1807 | ||
1808 | /* populate entry values */ | |
1809 | for (vector = 0; vector < v_budget; vector++) | |
1810 | interface->msix_entries[vector].entry = vector; | |
1811 | ||
1812 | /* Attempt to enable MSI-X with requested value */ | |
1813 | v_budget = pci_enable_msix_range(interface->pdev, | |
1814 | interface->msix_entries, | |
1815 | MIN_MSIX_COUNT(hw), | |
1816 | v_budget); | |
1817 | if (v_budget < 0) { | |
1818 | kfree(interface->msix_entries); | |
1819 | interface->msix_entries = NULL; | |
1820 | return -ENOMEM; | |
1821 | } | |
1822 | ||
1823 | /* record the number of queues available for q_vectors */ | |
1824 | interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); | |
1825 | ||
1826 | return 0; | |
1827 | } | |
1828 | ||
aa3ac822 AD |
1829 | /** |
1830 | * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS | |
1831 | * @interface: Interface structure continaining rings and devices | |
1832 | * | |
1833 | * Cache the descriptor ring offsets for Qos | |
1834 | **/ | |
1835 | static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) | |
1836 | { | |
1837 | struct net_device *dev = interface->netdev; | |
1838 | int pc, offset, rss_i, i, q_idx; | |
1839 | u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; | |
1840 | u8 num_pcs = netdev_get_num_tc(dev); | |
1841 | ||
1842 | if (num_pcs <= 1) | |
1843 | return false; | |
1844 | ||
1845 | rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1846 | ||
1847 | for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { | |
1848 | q_idx = pc; | |
1849 | for (i = 0; i < rss_i; i++) { | |
1850 | interface->tx_ring[offset + i]->reg_idx = q_idx; | |
1851 | interface->tx_ring[offset + i]->qos_pc = pc; | |
1852 | interface->rx_ring[offset + i]->reg_idx = q_idx; | |
1853 | interface->rx_ring[offset + i]->qos_pc = pc; | |
1854 | q_idx += pc_stride; | |
1855 | } | |
1856 | } | |
1857 | ||
1858 | return true; | |
1859 | } | |
1860 | ||
1861 | /** | |
1862 | * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS | |
1863 | * @interface: Interface structure continaining rings and devices | |
1864 | * | |
1865 | * Cache the descriptor ring offsets for RSS | |
1866 | **/ | |
1867 | static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) | |
1868 | { | |
1869 | int i; | |
1870 | ||
1871 | for (i = 0; i < interface->num_rx_queues; i++) | |
1872 | interface->rx_ring[i]->reg_idx = i; | |
1873 | ||
1874 | for (i = 0; i < interface->num_tx_queues; i++) | |
1875 | interface->tx_ring[i]->reg_idx = i; | |
1876 | } | |
1877 | ||
1878 | /** | |
1879 | * fm10k_assign_rings - Map rings to network devices | |
1880 | * @interface: Interface structure containing rings and devices | |
1881 | * | |
1882 | * This function is meant to go though and configure both the network | |
1883 | * devices so that they contain rings, and configure the rings so that | |
1884 | * they function with their network devices. | |
1885 | **/ | |
1886 | static void fm10k_assign_rings(struct fm10k_intfc *interface) | |
1887 | { | |
1888 | if (fm10k_cache_ring_qos(interface)) | |
1889 | return; | |
1890 | ||
1891 | fm10k_cache_ring_rss(interface); | |
1892 | } | |
1893 | ||
18283cad AD |
1894 | static void fm10k_init_reta(struct fm10k_intfc *interface) |
1895 | { | |
1896 | u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1897 | u32 reta, base; | |
1898 | ||
1899 | /* If the netdev is initialized we have to maintain table if possible */ | |
1900 | if (interface->netdev->reg_state) { | |
1901 | for (i = FM10K_RETA_SIZE; i--;) { | |
1902 | reta = interface->reta[i]; | |
1903 | if ((((reta << 24) >> 24) < rss_i) && | |
1904 | (((reta << 16) >> 24) < rss_i) && | |
1905 | (((reta << 8) >> 24) < rss_i) && | |
1906 | (((reta) >> 24) < rss_i)) | |
1907 | continue; | |
1908 | goto repopulate_reta; | |
1909 | } | |
1910 | ||
1911 | /* do nothing if all of the elements are in bounds */ | |
1912 | return; | |
1913 | } | |
1914 | ||
1915 | repopulate_reta: | |
1916 | /* Populate the redirection table 4 entries at a time. To do this | |
1917 | * we are generating the results for n and n+2 and then interleaving | |
1918 | * those with the results with n+1 and n+3. | |
1919 | */ | |
1920 | for (i = FM10K_RETA_SIZE; i--;) { | |
1921 | /* first pass generates n and n+2 */ | |
1922 | base = ((i * 0x00040004) + 0x00020000) * rss_i; | |
1923 | reta = (base & 0x3F803F80) >> 7; | |
1924 | ||
1925 | /* second pass generates n+1 and n+3 */ | |
1926 | base += 0x00010001 * rss_i; | |
1927 | reta |= (base & 0x3F803F80) << 1; | |
1928 | ||
1929 | interface->reta[i] = reta; | |
1930 | } | |
1931 | } | |
1932 | ||
1933 | /** | |
1934 | * fm10k_init_queueing_scheme - Determine proper queueing scheme | |
1935 | * @interface: board private structure to initialize | |
1936 | * | |
1937 | * We determine which queueing scheme to use based on... | |
1938 | * - Hardware queue count (num_*_queues) | |
1939 | * - defined by miscellaneous hardware support/features (RSS, etc.) | |
1940 | **/ | |
1941 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) | |
1942 | { | |
1943 | int err; | |
1944 | ||
1945 | /* Number of supported queues */ | |
1946 | fm10k_set_num_queues(interface); | |
1947 | ||
1948 | /* Configure MSI-X capability */ | |
1949 | err = fm10k_init_msix_capability(interface); | |
1950 | if (err) { | |
1951 | dev_err(&interface->pdev->dev, | |
1952 | "Unable to initialize MSI-X capability\n"); | |
1953 | return err; | |
1954 | } | |
1955 | ||
1956 | /* Allocate memory for queues */ | |
1957 | err = fm10k_alloc_q_vectors(interface); | |
1958 | if (err) | |
1959 | return err; | |
1960 | ||
aa3ac822 AD |
1961 | /* Map rings to devices, and map devices to physical queues */ |
1962 | fm10k_assign_rings(interface); | |
1963 | ||
18283cad AD |
1964 | /* Initialize RSS redirection table */ |
1965 | fm10k_init_reta(interface); | |
1966 | ||
1967 | return 0; | |
1968 | } | |
1969 | ||
1970 | /** | |
1971 | * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings | |
1972 | * @interface: board private structure to clear queueing scheme on | |
1973 | * | |
1974 | * We go through and clear queueing specific resources and reset the structure | |
1975 | * to pre-load conditions | |
1976 | **/ | |
1977 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) | |
1978 | { | |
1979 | fm10k_free_q_vectors(interface); | |
1980 | fm10k_reset_msix_capability(interface); | |
1981 | } |