Commit | Line | Data |
---|---|---|
0e7b3644 AD |
1 | /* Intel Ethernet Switch Host Interface Driver |
2 | * Copyright(c) 2013 - 2014 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include "fm10k.h" | |
3abaae42 AD |
22 | #include <linux/vmalloc.h> |
23 | ||
24 | /** | |
25 | * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) | |
26 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | |
27 | * | |
28 | * Return 0 on success, negative on failure | |
29 | **/ | |
30 | int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) | |
31 | { | |
32 | struct device *dev = tx_ring->dev; | |
33 | int size; | |
34 | ||
35 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
36 | ||
37 | tx_ring->tx_buffer = vzalloc(size); | |
38 | if (!tx_ring->tx_buffer) | |
39 | goto err; | |
40 | ||
41 | u64_stats_init(&tx_ring->syncp); | |
42 | ||
43 | /* round up to nearest 4K */ | |
44 | tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); | |
45 | tx_ring->size = ALIGN(tx_ring->size, 4096); | |
46 | ||
47 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, | |
48 | &tx_ring->dma, GFP_KERNEL); | |
49 | if (!tx_ring->desc) | |
50 | goto err; | |
51 | ||
52 | return 0; | |
53 | ||
54 | err: | |
55 | vfree(tx_ring->tx_buffer); | |
56 | tx_ring->tx_buffer = NULL; | |
57 | return -ENOMEM; | |
58 | } | |
59 | ||
60 | /** | |
61 | * fm10k_setup_all_tx_resources - allocate all queues Tx resources | |
62 | * @interface: board private structure | |
63 | * | |
64 | * If this function returns with an error, then it's possible one or | |
65 | * more of the rings is populated (while the rest are not). It is the | |
66 | * callers duty to clean those orphaned rings. | |
67 | * | |
68 | * Return 0 on success, negative on failure | |
69 | **/ | |
70 | static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) | |
71 | { | |
72 | int i, err = 0; | |
73 | ||
74 | for (i = 0; i < interface->num_tx_queues; i++) { | |
75 | err = fm10k_setup_tx_resources(interface->tx_ring[i]); | |
76 | if (!err) | |
77 | continue; | |
78 | ||
79 | netif_err(interface, probe, interface->netdev, | |
80 | "Allocation for Tx Queue %u failed\n", i); | |
81 | goto err_setup_tx; | |
82 | } | |
83 | ||
84 | return 0; | |
85 | err_setup_tx: | |
86 | /* rewind the index freeing the rings as we go */ | |
87 | while (i--) | |
88 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
89 | return err; | |
90 | } | |
91 | ||
92 | /** | |
93 | * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) | |
94 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | |
95 | * | |
96 | * Returns 0 on success, negative on failure | |
97 | **/ | |
98 | int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) | |
99 | { | |
100 | struct device *dev = rx_ring->dev; | |
101 | int size; | |
102 | ||
103 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
104 | ||
105 | rx_ring->rx_buffer = vzalloc(size); | |
106 | if (!rx_ring->rx_buffer) | |
107 | goto err; | |
108 | ||
109 | u64_stats_init(&rx_ring->syncp); | |
110 | ||
111 | /* Round up to nearest 4K */ | |
112 | rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); | |
113 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
114 | ||
115 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, | |
116 | &rx_ring->dma, GFP_KERNEL); | |
117 | if (!rx_ring->desc) | |
118 | goto err; | |
119 | ||
120 | return 0; | |
121 | err: | |
122 | vfree(rx_ring->rx_buffer); | |
123 | rx_ring->rx_buffer = NULL; | |
124 | return -ENOMEM; | |
125 | } | |
126 | ||
127 | /** | |
128 | * fm10k_setup_all_rx_resources - allocate all queues Rx resources | |
129 | * @interface: board private structure | |
130 | * | |
131 | * If this function returns with an error, then it's possible one or | |
132 | * more of the rings is populated (while the rest are not). It is the | |
133 | * callers duty to clean those orphaned rings. | |
134 | * | |
135 | * Return 0 on success, negative on failure | |
136 | **/ | |
137 | static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) | |
138 | { | |
139 | int i, err = 0; | |
140 | ||
141 | for (i = 0; i < interface->num_rx_queues; i++) { | |
142 | err = fm10k_setup_rx_resources(interface->rx_ring[i]); | |
143 | if (!err) | |
144 | continue; | |
145 | ||
146 | netif_err(interface, probe, interface->netdev, | |
147 | "Allocation for Rx Queue %u failed\n", i); | |
148 | goto err_setup_rx; | |
149 | } | |
150 | ||
151 | return 0; | |
152 | err_setup_rx: | |
153 | /* rewind the index freeing the rings as we go */ | |
154 | while (i--) | |
155 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
156 | return err; | |
157 | } | |
158 | ||
159 | void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, | |
160 | struct fm10k_tx_buffer *tx_buffer) | |
161 | { | |
162 | if (tx_buffer->skb) { | |
163 | dev_kfree_skb_any(tx_buffer->skb); | |
164 | if (dma_unmap_len(tx_buffer, len)) | |
165 | dma_unmap_single(ring->dev, | |
166 | dma_unmap_addr(tx_buffer, dma), | |
167 | dma_unmap_len(tx_buffer, len), | |
168 | DMA_TO_DEVICE); | |
169 | } else if (dma_unmap_len(tx_buffer, len)) { | |
170 | dma_unmap_page(ring->dev, | |
171 | dma_unmap_addr(tx_buffer, dma), | |
172 | dma_unmap_len(tx_buffer, len), | |
173 | DMA_TO_DEVICE); | |
174 | } | |
175 | tx_buffer->next_to_watch = NULL; | |
176 | tx_buffer->skb = NULL; | |
177 | dma_unmap_len_set(tx_buffer, len, 0); | |
178 | /* tx_buffer must be completely set up in the transmit path */ | |
179 | } | |
180 | ||
181 | /** | |
182 | * fm10k_clean_tx_ring - Free Tx Buffers | |
183 | * @tx_ring: ring to be cleaned | |
184 | **/ | |
185 | static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) | |
186 | { | |
187 | struct fm10k_tx_buffer *tx_buffer; | |
188 | unsigned long size; | |
189 | u16 i; | |
190 | ||
191 | /* ring already cleared, nothing to do */ | |
192 | if (!tx_ring->tx_buffer) | |
193 | return; | |
194 | ||
195 | /* Free all the Tx ring sk_buffs */ | |
196 | for (i = 0; i < tx_ring->count; i++) { | |
197 | tx_buffer = &tx_ring->tx_buffer[i]; | |
198 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
199 | } | |
200 | ||
201 | /* reset BQL values */ | |
202 | netdev_tx_reset_queue(txring_txq(tx_ring)); | |
203 | ||
204 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
205 | memset(tx_ring->tx_buffer, 0, size); | |
206 | ||
207 | /* Zero out the descriptor ring */ | |
208 | memset(tx_ring->desc, 0, tx_ring->size); | |
209 | } | |
210 | ||
211 | /** | |
212 | * fm10k_free_tx_resources - Free Tx Resources per Queue | |
213 | * @tx_ring: Tx descriptor ring for a specific queue | |
214 | * | |
215 | * Free all transmit software resources | |
216 | **/ | |
217 | void fm10k_free_tx_resources(struct fm10k_ring *tx_ring) | |
218 | { | |
219 | fm10k_clean_tx_ring(tx_ring); | |
220 | ||
221 | vfree(tx_ring->tx_buffer); | |
222 | tx_ring->tx_buffer = NULL; | |
223 | ||
224 | /* if not set, then don't free */ | |
225 | if (!tx_ring->desc) | |
226 | return; | |
227 | ||
228 | dma_free_coherent(tx_ring->dev, tx_ring->size, | |
229 | tx_ring->desc, tx_ring->dma); | |
230 | tx_ring->desc = NULL; | |
231 | } | |
232 | ||
233 | /** | |
234 | * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues | |
235 | * @interface: board private structure | |
236 | **/ | |
237 | void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) | |
238 | { | |
239 | int i; | |
240 | ||
241 | for (i = 0; i < interface->num_tx_queues; i++) | |
242 | fm10k_clean_tx_ring(interface->tx_ring[i]); | |
243 | } | |
244 | ||
245 | /** | |
246 | * fm10k_free_all_tx_resources - Free Tx Resources for All Queues | |
247 | * @interface: board private structure | |
248 | * | |
249 | * Free all transmit software resources | |
250 | **/ | |
251 | static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface) | |
252 | { | |
253 | int i = interface->num_tx_queues; | |
254 | ||
255 | while (i--) | |
256 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
257 | } | |
258 | ||
259 | /** | |
260 | * fm10k_clean_rx_ring - Free Rx Buffers per Queue | |
261 | * @rx_ring: ring to free buffers from | |
262 | **/ | |
263 | static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) | |
264 | { | |
265 | unsigned long size; | |
266 | u16 i; | |
267 | ||
268 | if (!rx_ring->rx_buffer) | |
269 | return; | |
270 | ||
271 | if (rx_ring->skb) | |
272 | dev_kfree_skb(rx_ring->skb); | |
273 | rx_ring->skb = NULL; | |
274 | ||
275 | /* Free all the Rx ring sk_buffs */ | |
276 | for (i = 0; i < rx_ring->count; i++) { | |
277 | struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; | |
278 | /* clean-up will only set page pointer to NULL */ | |
279 | if (!buffer->page) | |
280 | continue; | |
281 | ||
282 | dma_unmap_page(rx_ring->dev, buffer->dma, | |
283 | PAGE_SIZE, DMA_FROM_DEVICE); | |
284 | __free_page(buffer->page); | |
285 | ||
286 | buffer->page = NULL; | |
287 | } | |
288 | ||
289 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
290 | memset(rx_ring->rx_buffer, 0, size); | |
291 | ||
292 | /* Zero out the descriptor ring */ | |
293 | memset(rx_ring->desc, 0, rx_ring->size); | |
294 | ||
295 | rx_ring->next_to_alloc = 0; | |
296 | rx_ring->next_to_clean = 0; | |
297 | rx_ring->next_to_use = 0; | |
298 | } | |
299 | ||
300 | /** | |
301 | * fm10k_free_rx_resources - Free Rx Resources | |
302 | * @rx_ring: ring to clean the resources from | |
303 | * | |
304 | * Free all receive software resources | |
305 | **/ | |
306 | void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) | |
307 | { | |
308 | fm10k_clean_rx_ring(rx_ring); | |
309 | ||
310 | vfree(rx_ring->rx_buffer); | |
311 | rx_ring->rx_buffer = NULL; | |
312 | ||
313 | /* if not set, then don't free */ | |
314 | if (!rx_ring->desc) | |
315 | return; | |
316 | ||
317 | dma_free_coherent(rx_ring->dev, rx_ring->size, | |
318 | rx_ring->desc, rx_ring->dma); | |
319 | ||
320 | rx_ring->desc = NULL; | |
321 | } | |
322 | ||
323 | /** | |
324 | * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues | |
325 | * @interface: board private structure | |
326 | **/ | |
327 | void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface) | |
328 | { | |
329 | int i; | |
330 | ||
331 | for (i = 0; i < interface->num_rx_queues; i++) | |
332 | fm10k_clean_rx_ring(interface->rx_ring[i]); | |
333 | } | |
334 | ||
335 | /** | |
336 | * fm10k_free_all_rx_resources - Free Rx Resources for All Queues | |
337 | * @interface: board private structure | |
338 | * | |
339 | * Free all receive software resources | |
340 | **/ | |
341 | static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) | |
342 | { | |
343 | int i = interface->num_rx_queues; | |
344 | ||
345 | while (i--) | |
346 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
347 | } | |
0e7b3644 | 348 | |
504c5eac AD |
349 | /** |
350 | * fm10k_request_glort_range - Request GLORTs for use in configuring rules | |
351 | * @interface: board private structure | |
352 | * | |
353 | * This function allocates a range of glorts for this inteface to use. | |
354 | **/ | |
355 | static void fm10k_request_glort_range(struct fm10k_intfc *interface) | |
356 | { | |
357 | struct fm10k_hw *hw = &interface->hw; | |
358 | u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; | |
359 | ||
360 | /* establish GLORT base */ | |
361 | interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; | |
362 | interface->glort_count = 0; | |
363 | ||
364 | /* nothing we can do until mask is allocated */ | |
365 | if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) | |
366 | return; | |
367 | ||
368 | interface->glort_count = mask + 1; | |
369 | } | |
370 | ||
371 | /** | |
372 | * fm10k_open - Called when a network interface is made active | |
373 | * @netdev: network interface device structure | |
374 | * | |
375 | * Returns 0 on success, negative value on failure | |
376 | * | |
377 | * The open entry point is called when a network interface is made | |
378 | * active by the system (IFF_UP). At this point all resources needed | |
379 | * for transmit and receive operations are allocated, the interrupt | |
380 | * handler is registered with the OS, the watchdog timer is started, | |
381 | * and the stack is notified that the interface is ready. | |
382 | **/ | |
383 | int fm10k_open(struct net_device *netdev) | |
384 | { | |
385 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
18283cad AD |
386 | int err; |
387 | ||
3abaae42 AD |
388 | /* allocate transmit descriptors */ |
389 | err = fm10k_setup_all_tx_resources(interface); | |
390 | if (err) | |
391 | goto err_setup_tx; | |
392 | ||
393 | /* allocate receive descriptors */ | |
394 | err = fm10k_setup_all_rx_resources(interface); | |
395 | if (err) | |
396 | goto err_setup_rx; | |
397 | ||
18283cad AD |
398 | /* allocate interrupt resources */ |
399 | err = fm10k_qv_request_irq(interface); | |
400 | if (err) | |
401 | goto err_req_irq; | |
504c5eac AD |
402 | |
403 | /* setup GLORT assignment for this port */ | |
404 | fm10k_request_glort_range(interface); | |
405 | ||
e27ef599 AD |
406 | /* Notify the stack of the actual queue counts */ |
407 | ||
408 | err = netif_set_real_num_rx_queues(netdev, | |
409 | interface->num_rx_queues); | |
410 | if (err) | |
411 | goto err_set_queues; | |
412 | ||
504c5eac AD |
413 | fm10k_up(interface); |
414 | ||
415 | return 0; | |
18283cad | 416 | |
e27ef599 AD |
417 | err_set_queues: |
418 | fm10k_qv_free_irq(interface); | |
18283cad | 419 | err_req_irq: |
3abaae42 AD |
420 | fm10k_free_all_rx_resources(interface); |
421 | err_setup_rx: | |
422 | fm10k_free_all_tx_resources(interface); | |
423 | err_setup_tx: | |
18283cad | 424 | return err; |
504c5eac AD |
425 | } |
426 | ||
427 | /** | |
428 | * fm10k_close - Disables a network interface | |
429 | * @netdev: network interface device structure | |
430 | * | |
431 | * Returns 0, this is not allowed to fail | |
432 | * | |
433 | * The close entry point is called when an interface is de-activated | |
434 | * by the OS. The hardware is still under the drivers control, but | |
435 | * needs to be disabled. A global MAC reset is issued to stop the | |
436 | * hardware, and all transmit and receive resources are freed. | |
437 | **/ | |
438 | int fm10k_close(struct net_device *netdev) | |
439 | { | |
440 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
441 | ||
442 | fm10k_down(interface); | |
443 | ||
18283cad AD |
444 | fm10k_qv_free_irq(interface); |
445 | ||
3abaae42 AD |
446 | fm10k_free_all_tx_resources(interface); |
447 | fm10k_free_all_rx_resources(interface); | |
448 | ||
504c5eac AD |
449 | return 0; |
450 | } | |
451 | ||
0e7b3644 AD |
452 | static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) |
453 | { | |
b101c962 AD |
454 | struct fm10k_intfc *interface = netdev_priv(dev); |
455 | unsigned int r_idx = 0; | |
456 | int err; | |
457 | ||
458 | if ((skb->protocol == htons(ETH_P_8021Q)) && | |
459 | !vlan_tx_tag_present(skb)) { | |
460 | /* FM10K only supports hardware tagging, any tags in frame | |
461 | * are considered 2nd level or "outer" tags | |
462 | */ | |
463 | struct vlan_hdr *vhdr; | |
464 | __be16 proto; | |
465 | ||
466 | /* make sure skb is not shared */ | |
467 | skb = skb_share_check(skb, GFP_ATOMIC); | |
468 | if (!skb) | |
469 | return NETDEV_TX_OK; | |
470 | ||
471 | /* make sure there is enough room to move the ethernet header */ | |
472 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) | |
473 | return NETDEV_TX_OK; | |
474 | ||
475 | /* verify the skb head is not shared */ | |
476 | err = skb_cow_head(skb, 0); | |
477 | if (err) | |
478 | return NETDEV_TX_OK; | |
479 | ||
480 | /* locate vlan header */ | |
481 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | |
482 | ||
483 | /* pull the 2 key pieces of data out of it */ | |
484 | __vlan_hwaccel_put_tag(skb, | |
485 | htons(ETH_P_8021Q), | |
486 | ntohs(vhdr->h_vlan_TCI)); | |
487 | proto = vhdr->h_vlan_encapsulated_proto; | |
488 | skb->protocol = (ntohs(proto) >= 1536) ? proto : | |
489 | htons(ETH_P_802_2); | |
490 | ||
491 | /* squash it by moving the ethernet addresses up 4 bytes */ | |
492 | memmove(skb->data + VLAN_HLEN, skb->data, 12); | |
493 | __skb_pull(skb, VLAN_HLEN); | |
494 | skb_reset_mac_header(skb); | |
495 | } | |
496 | ||
497 | /* The minimum packet size for a single buffer is 17B so pad the skb | |
498 | * in order to meet this minimum size requirement. | |
499 | */ | |
500 | if (unlikely(skb->len < 17)) { | |
501 | int pad_len = 17 - skb->len; | |
502 | ||
503 | if (skb_pad(skb, pad_len)) | |
504 | return NETDEV_TX_OK; | |
505 | __skb_put(skb, pad_len); | |
506 | } | |
507 | ||
508 | if (r_idx >= interface->num_tx_queues) | |
509 | r_idx %= interface->num_tx_queues; | |
510 | ||
511 | err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); | |
512 | ||
513 | return err; | |
0e7b3644 AD |
514 | } |
515 | ||
516 | static int fm10k_change_mtu(struct net_device *dev, int new_mtu) | |
517 | { | |
518 | if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE) | |
519 | return -EINVAL; | |
520 | ||
521 | dev->mtu = new_mtu; | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
b101c962 AD |
526 | /** |
527 | * fm10k_tx_timeout - Respond to a Tx Hang | |
528 | * @netdev: network interface device structure | |
529 | **/ | |
530 | static void fm10k_tx_timeout(struct net_device *netdev) | |
531 | { | |
532 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
533 | bool real_tx_hang = false; | |
534 | int i; | |
535 | ||
536 | #define TX_TIMEO_LIMIT 16000 | |
537 | for (i = 0; i < interface->num_tx_queues; i++) { | |
538 | struct fm10k_ring *tx_ring = interface->tx_ring[i]; | |
539 | ||
540 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) | |
541 | real_tx_hang = true; | |
542 | } | |
543 | ||
544 | if (real_tx_hang) { | |
545 | fm10k_tx_timeout_reset(interface); | |
546 | } else { | |
547 | netif_info(interface, drv, netdev, | |
548 | "Fake Tx hang detected with timeout of %d seconds\n", | |
549 | netdev->watchdog_timeo/HZ); | |
550 | ||
551 | /* fake Tx hang - increase the kernel timeout */ | |
552 | if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) | |
553 | netdev->watchdog_timeo *= 2; | |
554 | } | |
555 | } | |
556 | ||
8f5e20d4 AD |
557 | static int fm10k_uc_vlan_unsync(struct net_device *netdev, |
558 | const unsigned char *uc_addr) | |
559 | { | |
560 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
561 | struct fm10k_hw *hw = &interface->hw; | |
562 | u16 glort = interface->glort; | |
563 | u16 vid = interface->vid; | |
564 | bool set = !!(vid / VLAN_N_VID); | |
565 | int err; | |
566 | ||
567 | /* drop any leading bits on the VLAN ID */ | |
568 | vid &= VLAN_N_VID - 1; | |
569 | ||
570 | err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); | |
571 | if (err) | |
572 | return err; | |
573 | ||
574 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
575 | return 1; | |
576 | } | |
577 | ||
578 | static int fm10k_mc_vlan_unsync(struct net_device *netdev, | |
579 | const unsigned char *mc_addr) | |
580 | { | |
581 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
582 | struct fm10k_hw *hw = &interface->hw; | |
583 | u16 glort = interface->glort; | |
584 | u16 vid = interface->vid; | |
585 | bool set = !!(vid / VLAN_N_VID); | |
586 | int err; | |
587 | ||
588 | /* drop any leading bits on the VLAN ID */ | |
589 | vid &= VLAN_N_VID - 1; | |
590 | ||
591 | err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); | |
592 | if (err) | |
593 | return err; | |
594 | ||
595 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
596 | return 1; | |
597 | } | |
598 | ||
599 | static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) | |
600 | { | |
601 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
602 | struct fm10k_hw *hw = &interface->hw; | |
603 | s32 err; | |
604 | ||
605 | /* updates do not apply to VLAN 0 */ | |
606 | if (!vid) | |
607 | return 0; | |
608 | ||
609 | if (vid >= VLAN_N_VID) | |
610 | return -EINVAL; | |
611 | ||
612 | /* Verify we have permission to add VLANs */ | |
613 | if (hw->mac.vlan_override) | |
614 | return -EACCES; | |
615 | ||
616 | /* if default VLAN is already present do nothing */ | |
617 | if (vid == hw->mac.default_vid) | |
618 | return -EBUSY; | |
619 | ||
620 | /* update active_vlans bitmask */ | |
621 | set_bit(vid, interface->active_vlans); | |
622 | if (!set) | |
623 | clear_bit(vid, interface->active_vlans); | |
624 | ||
625 | fm10k_mbx_lock(interface); | |
626 | ||
627 | /* only need to update the VLAN if not in promiscous mode */ | |
628 | if (!(netdev->flags & IFF_PROMISC)) { | |
629 | err = hw->mac.ops.update_vlan(hw, vid, 0, set); | |
630 | if (err) | |
631 | return err; | |
632 | } | |
633 | ||
634 | /* update our base MAC address */ | |
635 | err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, | |
636 | vid, set, 0); | |
637 | if (err) | |
638 | return err; | |
639 | ||
640 | /* set vid prior to syncing/unsyncing the VLAN */ | |
641 | interface->vid = vid + (set ? VLAN_N_VID : 0); | |
642 | ||
643 | /* Update the unicast and multicast address list to add/drop VLAN */ | |
644 | __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); | |
645 | __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); | |
646 | ||
647 | fm10k_mbx_unlock(interface); | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | static int fm10k_vlan_rx_add_vid(struct net_device *netdev, | |
653 | __always_unused __be16 proto, u16 vid) | |
654 | { | |
655 | /* update VLAN and address table based on changes */ | |
656 | return fm10k_update_vid(netdev, vid, true); | |
657 | } | |
658 | ||
659 | static int fm10k_vlan_rx_kill_vid(struct net_device *netdev, | |
660 | __always_unused __be16 proto, u16 vid) | |
661 | { | |
662 | /* update VLAN and address table based on changes */ | |
663 | return fm10k_update_vid(netdev, vid, false); | |
664 | } | |
665 | ||
666 | static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) | |
667 | { | |
668 | struct fm10k_hw *hw = &interface->hw; | |
669 | u16 default_vid = hw->mac.default_vid; | |
670 | u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID; | |
671 | ||
672 | vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); | |
673 | ||
674 | return vid; | |
675 | } | |
676 | ||
677 | static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) | |
678 | { | |
679 | struct fm10k_hw *hw = &interface->hw; | |
680 | u32 vid, prev_vid; | |
681 | ||
682 | /* loop through and find any gaps in the table */ | |
683 | for (vid = 0, prev_vid = 0; | |
684 | prev_vid < VLAN_N_VID; | |
685 | prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) { | |
686 | if (prev_vid == vid) | |
687 | continue; | |
688 | ||
689 | /* send request to clear multiple bits at a time */ | |
690 | prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; | |
691 | hw->mac.ops.update_vlan(hw, prev_vid, 0, false); | |
692 | } | |
693 | } | |
694 | ||
695 | static int __fm10k_uc_sync(struct net_device *dev, | |
696 | const unsigned char *addr, bool sync) | |
697 | { | |
698 | struct fm10k_intfc *interface = netdev_priv(dev); | |
699 | struct fm10k_hw *hw = &interface->hw; | |
700 | u16 vid, glort = interface->glort; | |
701 | s32 err; | |
702 | ||
703 | if (!is_valid_ether_addr(addr)) | |
704 | return -EADDRNOTAVAIL; | |
705 | ||
706 | /* update table with current entries */ | |
707 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
708 | vid < VLAN_N_VID; | |
709 | vid = fm10k_find_next_vlan(interface, vid)) { | |
710 | err = hw->mac.ops.update_uc_addr(hw, glort, addr, | |
711 | vid, sync, 0); | |
712 | if (err) | |
713 | return err; | |
714 | } | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | static int fm10k_uc_sync(struct net_device *dev, | |
720 | const unsigned char *addr) | |
721 | { | |
722 | return __fm10k_uc_sync(dev, addr, true); | |
723 | } | |
724 | ||
725 | static int fm10k_uc_unsync(struct net_device *dev, | |
726 | const unsigned char *addr) | |
727 | { | |
728 | return __fm10k_uc_sync(dev, addr, false); | |
729 | } | |
730 | ||
0e7b3644 AD |
731 | static int fm10k_set_mac(struct net_device *dev, void *p) |
732 | { | |
8f5e20d4 AD |
733 | struct fm10k_intfc *interface = netdev_priv(dev); |
734 | struct fm10k_hw *hw = &interface->hw; | |
0e7b3644 AD |
735 | struct sockaddr *addr = p; |
736 | s32 err = 0; | |
737 | ||
738 | if (!is_valid_ether_addr(addr->sa_data)) | |
739 | return -EADDRNOTAVAIL; | |
740 | ||
8f5e20d4 AD |
741 | if (dev->flags & IFF_UP) { |
742 | /* setting MAC address requires mailbox */ | |
743 | fm10k_mbx_lock(interface); | |
744 | ||
745 | err = fm10k_uc_sync(dev, addr->sa_data); | |
746 | if (!err) | |
747 | fm10k_uc_unsync(dev, hw->mac.addr); | |
748 | ||
749 | fm10k_mbx_unlock(interface); | |
750 | } | |
751 | ||
0e7b3644 AD |
752 | if (!err) { |
753 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
8f5e20d4 | 754 | ether_addr_copy(hw->mac.addr, addr->sa_data); |
0e7b3644 AD |
755 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
756 | } | |
757 | ||
8f5e20d4 AD |
758 | /* if we had a mailbox error suggest trying again */ |
759 | return err ? -EAGAIN : 0; | |
760 | } | |
761 | ||
762 | static int __fm10k_mc_sync(struct net_device *dev, | |
763 | const unsigned char *addr, bool sync) | |
764 | { | |
765 | struct fm10k_intfc *interface = netdev_priv(dev); | |
766 | struct fm10k_hw *hw = &interface->hw; | |
767 | u16 vid, glort = interface->glort; | |
768 | s32 err; | |
769 | ||
770 | if (!is_multicast_ether_addr(addr)) | |
771 | return -EADDRNOTAVAIL; | |
772 | ||
773 | /* update table with current entries */ | |
774 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
775 | vid < VLAN_N_VID; | |
776 | vid = fm10k_find_next_vlan(interface, vid)) { | |
777 | err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); | |
778 | if (err) | |
779 | return err; | |
780 | } | |
781 | ||
782 | return 0; | |
783 | } | |
784 | ||
785 | static int fm10k_mc_sync(struct net_device *dev, | |
786 | const unsigned char *addr) | |
787 | { | |
788 | return __fm10k_mc_sync(dev, addr, true); | |
789 | } | |
790 | ||
791 | static int fm10k_mc_unsync(struct net_device *dev, | |
792 | const unsigned char *addr) | |
793 | { | |
794 | return __fm10k_mc_sync(dev, addr, false); | |
0e7b3644 AD |
795 | } |
796 | ||
797 | static void fm10k_set_rx_mode(struct net_device *dev) | |
798 | { | |
8f5e20d4 AD |
799 | struct fm10k_intfc *interface = netdev_priv(dev); |
800 | struct fm10k_hw *hw = &interface->hw; | |
801 | int xcast_mode; | |
802 | ||
803 | /* no need to update the harwdare if we are not running */ | |
804 | if (!(dev->flags & IFF_UP)) | |
805 | return; | |
806 | ||
807 | /* determine new mode based on flags */ | |
808 | xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : | |
809 | (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : | |
810 | (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? | |
811 | FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE; | |
812 | ||
813 | fm10k_mbx_lock(interface); | |
814 | ||
815 | /* syncronize all of the addresses */ | |
816 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { | |
817 | __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); | |
818 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | |
819 | __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); | |
820 | } | |
821 | ||
822 | /* if we aren't changing modes there is nothing to do */ | |
823 | if (interface->xcast_mode != xcast_mode) { | |
824 | /* update VLAN table */ | |
825 | if (xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
826 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); | |
827 | if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
828 | fm10k_clear_unused_vlans(interface); | |
829 | ||
830 | /* update xcast mode */ | |
831 | hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); | |
832 | ||
833 | /* record updated xcast mode state */ | |
834 | interface->xcast_mode = xcast_mode; | |
835 | } | |
836 | ||
837 | fm10k_mbx_unlock(interface); | |
838 | } | |
839 | ||
840 | void fm10k_restore_rx_state(struct fm10k_intfc *interface) | |
841 | { | |
842 | struct net_device *netdev = interface->netdev; | |
843 | struct fm10k_hw *hw = &interface->hw; | |
844 | int xcast_mode; | |
845 | u16 vid, glort; | |
846 | ||
847 | /* record glort for this interface */ | |
848 | glort = interface->glort; | |
849 | ||
850 | /* convert interface flags to xcast mode */ | |
851 | if (netdev->flags & IFF_PROMISC) | |
852 | xcast_mode = FM10K_XCAST_MODE_PROMISC; | |
853 | else if (netdev->flags & IFF_ALLMULTI) | |
854 | xcast_mode = FM10K_XCAST_MODE_ALLMULTI; | |
855 | else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) | |
856 | xcast_mode = FM10K_XCAST_MODE_MULTI; | |
857 | else | |
858 | xcast_mode = FM10K_XCAST_MODE_NONE; | |
859 | ||
860 | fm10k_mbx_lock(interface); | |
861 | ||
862 | /* Enable logical port */ | |
863 | hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); | |
864 | ||
865 | /* update VLAN table */ | |
866 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, | |
867 | xcast_mode == FM10K_XCAST_MODE_PROMISC); | |
868 | ||
869 | /* Add filter for VLAN 0 */ | |
870 | hw->mac.ops.update_vlan(hw, 0, 0, true); | |
871 | ||
872 | /* update table with current entries */ | |
873 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
874 | vid < VLAN_N_VID; | |
875 | vid = fm10k_find_next_vlan(interface, vid)) { | |
876 | hw->mac.ops.update_vlan(hw, vid, 0, true); | |
877 | hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, | |
878 | vid, true, 0); | |
879 | } | |
880 | ||
881 | /* syncronize all of the addresses */ | |
882 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { | |
883 | __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); | |
884 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | |
885 | __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); | |
886 | } | |
887 | ||
888 | /* update xcast mode */ | |
889 | hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); | |
890 | ||
891 | fm10k_mbx_unlock(interface); | |
892 | ||
893 | /* record updated xcast mode state */ | |
894 | interface->xcast_mode = xcast_mode; | |
895 | } | |
896 | ||
897 | void fm10k_reset_rx_state(struct fm10k_intfc *interface) | |
898 | { | |
899 | struct net_device *netdev = interface->netdev; | |
900 | struct fm10k_hw *hw = &interface->hw; | |
901 | ||
902 | fm10k_mbx_lock(interface); | |
903 | ||
904 | /* clear the logical port state on lower device */ | |
905 | hw->mac.ops.update_lport_state(hw, interface->glort, | |
906 | interface->glort_count, false); | |
907 | ||
908 | fm10k_mbx_unlock(interface); | |
909 | ||
910 | /* reset flags to default state */ | |
911 | interface->xcast_mode = FM10K_XCAST_MODE_NONE; | |
912 | ||
913 | /* clear the sync flag since the lport has been dropped */ | |
914 | __dev_uc_unsync(netdev, NULL); | |
915 | __dev_mc_unsync(netdev, NULL); | |
0e7b3644 AD |
916 | } |
917 | ||
e27ef599 AD |
918 | /** |
919 | * fm10k_get_stats64 - Get System Network Statistics | |
920 | * @netdev: network interface device structure | |
921 | * @stats: storage space for 64bit statistics | |
922 | * | |
923 | * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This | |
924 | * function replaces fm10k_get_stats for kernels which support it. | |
925 | */ | |
926 | static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, | |
927 | struct rtnl_link_stats64 *stats) | |
928 | { | |
929 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
930 | struct fm10k_ring *ring; | |
931 | unsigned int start, i; | |
932 | u64 bytes, packets; | |
933 | ||
934 | rcu_read_lock(); | |
935 | ||
936 | for (i = 0; i < interface->num_rx_queues; i++) { | |
937 | ring = ACCESS_ONCE(interface->rx_ring[i]); | |
938 | ||
939 | if (!ring) | |
940 | continue; | |
941 | ||
942 | do { | |
943 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
944 | packets = ring->stats.packets; | |
945 | bytes = ring->stats.bytes; | |
946 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
947 | ||
948 | stats->rx_packets += packets; | |
949 | stats->rx_bytes += bytes; | |
950 | } | |
951 | ||
952 | for (i = 0; i < interface->num_tx_queues; i++) { | |
953 | ring = ACCESS_ONCE(interface->rx_ring[i]); | |
954 | ||
955 | if (!ring) | |
956 | continue; | |
957 | ||
958 | do { | |
959 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
960 | packets = ring->stats.packets; | |
961 | bytes = ring->stats.bytes; | |
962 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
963 | ||
964 | stats->tx_packets += packets; | |
965 | stats->tx_bytes += bytes; | |
966 | } | |
967 | ||
968 | rcu_read_unlock(); | |
969 | ||
970 | /* following stats updated by fm10k_service_task() */ | |
971 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; | |
972 | ||
973 | return stats; | |
974 | } | |
975 | ||
0e7b3644 | 976 | static const struct net_device_ops fm10k_netdev_ops = { |
504c5eac AD |
977 | .ndo_open = fm10k_open, |
978 | .ndo_stop = fm10k_close, | |
0e7b3644 AD |
979 | .ndo_validate_addr = eth_validate_addr, |
980 | .ndo_start_xmit = fm10k_xmit_frame, | |
981 | .ndo_set_mac_address = fm10k_set_mac, | |
982 | .ndo_change_mtu = fm10k_change_mtu, | |
b101c962 | 983 | .ndo_tx_timeout = fm10k_tx_timeout, |
8f5e20d4 AD |
984 | .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, |
985 | .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, | |
0e7b3644 | 986 | .ndo_set_rx_mode = fm10k_set_rx_mode, |
e27ef599 | 987 | .ndo_get_stats64 = fm10k_get_stats64, |
0e7b3644 AD |
988 | }; |
989 | ||
990 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | |
991 | ||
992 | struct net_device *fm10k_alloc_netdev(void) | |
993 | { | |
994 | struct fm10k_intfc *interface; | |
995 | struct net_device *dev; | |
996 | ||
e27ef599 | 997 | dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES); |
0e7b3644 AD |
998 | if (!dev) |
999 | return NULL; | |
1000 | ||
1001 | /* set net device and ethtool ops */ | |
1002 | dev->netdev_ops = &fm10k_netdev_ops; | |
1003 | ||
1004 | /* configure default debug level */ | |
1005 | interface = netdev_priv(dev); | |
1006 | interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; | |
1007 | ||
1008 | /* configure default features */ | |
1009 | dev->features |= NETIF_F_SG; | |
1010 | ||
1011 | /* all features defined to this point should be changeable */ | |
1012 | dev->hw_features |= dev->features; | |
1013 | ||
1014 | /* configure VLAN features */ | |
1015 | dev->vlan_features |= dev->features; | |
1016 | ||
1017 | /* configure tunnel offloads */ | |
1018 | dev->hw_enc_features = NETIF_F_SG; | |
1019 | ||
8f5e20d4 AD |
1020 | /* we want to leave these both on as we cannot disable VLAN tag |
1021 | * insertion or stripping on the hardware since it is contained | |
1022 | * in the FTAG and not in the frame itself. | |
1023 | */ | |
1024 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | | |
1025 | NETIF_F_HW_VLAN_CTAG_RX | | |
1026 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
1027 | ||
1028 | dev->priv_flags |= IFF_UNICAST_FLT; | |
1029 | ||
0e7b3644 AD |
1030 | return dev; |
1031 | } |