1 /* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 #include <linux/vmalloc.h>
23 #if IS_ENABLED(CONFIG_VXLAN)
24 #include <net/vxlan.h>
25 #endif /* CONFIG_VXLAN */
28 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
29 * @tx_ring: tx descriptor ring (for a specific queue) to setup
31 * Return 0 on success, negative on failure
33 int fm10k_setup_tx_resources(struct fm10k_ring
*tx_ring
)
35 struct device
*dev
= tx_ring
->dev
;
38 size
= sizeof(struct fm10k_tx_buffer
) * tx_ring
->count
;
40 tx_ring
->tx_buffer
= vzalloc(size
);
41 if (!tx_ring
->tx_buffer
)
44 u64_stats_init(&tx_ring
->syncp
);
46 /* round up to nearest 4K */
47 tx_ring
->size
= tx_ring
->count
* sizeof(struct fm10k_tx_desc
);
48 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
50 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
51 &tx_ring
->dma
, GFP_KERNEL
);
58 vfree(tx_ring
->tx_buffer
);
59 tx_ring
->tx_buffer
= NULL
;
64 * fm10k_setup_all_tx_resources - allocate all queues Tx resources
65 * @interface: board private structure
67 * If this function returns with an error, then it's possible one or
68 * more of the rings is populated (while the rest are not). It is the
69 * callers duty to clean those orphaned rings.
71 * Return 0 on success, negative on failure
73 static int fm10k_setup_all_tx_resources(struct fm10k_intfc
*interface
)
77 for (i
= 0; i
< interface
->num_tx_queues
; i
++) {
78 err
= fm10k_setup_tx_resources(interface
->tx_ring
[i
]);
82 netif_err(interface
, probe
, interface
->netdev
,
83 "Allocation for Tx Queue %u failed\n", i
);
89 /* rewind the index freeing the rings as we go */
91 fm10k_free_tx_resources(interface
->tx_ring
[i
]);
96 * fm10k_setup_rx_resources - allocate Rx resources (Descriptors)
97 * @rx_ring: rx descriptor ring (for a specific queue) to setup
99 * Returns 0 on success, negative on failure
101 int fm10k_setup_rx_resources(struct fm10k_ring
*rx_ring
)
103 struct device
*dev
= rx_ring
->dev
;
106 size
= sizeof(struct fm10k_rx_buffer
) * rx_ring
->count
;
108 rx_ring
->rx_buffer
= vzalloc(size
);
109 if (!rx_ring
->rx_buffer
)
112 u64_stats_init(&rx_ring
->syncp
);
114 /* Round up to nearest 4K */
115 rx_ring
->size
= rx_ring
->count
* sizeof(union fm10k_rx_desc
);
116 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
118 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
119 &rx_ring
->dma
, GFP_KERNEL
);
125 vfree(rx_ring
->rx_buffer
);
126 rx_ring
->rx_buffer
= NULL
;
131 * fm10k_setup_all_rx_resources - allocate all queues Rx resources
132 * @interface: board private structure
134 * If this function returns with an error, then it's possible one or
135 * more of the rings is populated (while the rest are not). It is the
136 * callers duty to clean those orphaned rings.
138 * Return 0 on success, negative on failure
140 static int fm10k_setup_all_rx_resources(struct fm10k_intfc
*interface
)
144 for (i
= 0; i
< interface
->num_rx_queues
; i
++) {
145 err
= fm10k_setup_rx_resources(interface
->rx_ring
[i
]);
149 netif_err(interface
, probe
, interface
->netdev
,
150 "Allocation for Rx Queue %u failed\n", i
);
156 /* rewind the index freeing the rings as we go */
158 fm10k_free_rx_resources(interface
->rx_ring
[i
]);
162 void fm10k_unmap_and_free_tx_resource(struct fm10k_ring
*ring
,
163 struct fm10k_tx_buffer
*tx_buffer
)
165 if (tx_buffer
->skb
) {
166 dev_kfree_skb_any(tx_buffer
->skb
);
167 if (dma_unmap_len(tx_buffer
, len
))
168 dma_unmap_single(ring
->dev
,
169 dma_unmap_addr(tx_buffer
, dma
),
170 dma_unmap_len(tx_buffer
, len
),
172 } else if (dma_unmap_len(tx_buffer
, len
)) {
173 dma_unmap_page(ring
->dev
,
174 dma_unmap_addr(tx_buffer
, dma
),
175 dma_unmap_len(tx_buffer
, len
),
178 tx_buffer
->next_to_watch
= NULL
;
179 tx_buffer
->skb
= NULL
;
180 dma_unmap_len_set(tx_buffer
, len
, 0);
181 /* tx_buffer must be completely set up in the transmit path */
185 * fm10k_clean_tx_ring - Free Tx Buffers
186 * @tx_ring: ring to be cleaned
188 static void fm10k_clean_tx_ring(struct fm10k_ring
*tx_ring
)
190 struct fm10k_tx_buffer
*tx_buffer
;
194 /* ring already cleared, nothing to do */
195 if (!tx_ring
->tx_buffer
)
198 /* Free all the Tx ring sk_buffs */
199 for (i
= 0; i
< tx_ring
->count
; i
++) {
200 tx_buffer
= &tx_ring
->tx_buffer
[i
];
201 fm10k_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
204 /* reset BQL values */
205 netdev_tx_reset_queue(txring_txq(tx_ring
));
207 size
= sizeof(struct fm10k_tx_buffer
) * tx_ring
->count
;
208 memset(tx_ring
->tx_buffer
, 0, size
);
210 /* Zero out the descriptor ring */
211 memset(tx_ring
->desc
, 0, tx_ring
->size
);
215 * fm10k_free_tx_resources - Free Tx Resources per Queue
216 * @tx_ring: Tx descriptor ring for a specific queue
218 * Free all transmit software resources
220 void fm10k_free_tx_resources(struct fm10k_ring
*tx_ring
)
222 fm10k_clean_tx_ring(tx_ring
);
224 vfree(tx_ring
->tx_buffer
);
225 tx_ring
->tx_buffer
= NULL
;
227 /* if not set, then don't free */
231 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
232 tx_ring
->desc
, tx_ring
->dma
);
233 tx_ring
->desc
= NULL
;
237 * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues
238 * @interface: board private structure
240 void fm10k_clean_all_tx_rings(struct fm10k_intfc
*interface
)
244 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
245 fm10k_clean_tx_ring(interface
->tx_ring
[i
]);
247 /* remove any stale timestamp buffers and free them */
248 skb_queue_purge(&interface
->ts_tx_skb_queue
);
252 * fm10k_free_all_tx_resources - Free Tx Resources for All Queues
253 * @interface: board private structure
255 * Free all transmit software resources
257 static void fm10k_free_all_tx_resources(struct fm10k_intfc
*interface
)
259 int i
= interface
->num_tx_queues
;
262 fm10k_free_tx_resources(interface
->tx_ring
[i
]);
266 * fm10k_clean_rx_ring - Free Rx Buffers per Queue
267 * @rx_ring: ring to free buffers from
269 static void fm10k_clean_rx_ring(struct fm10k_ring
*rx_ring
)
274 if (!rx_ring
->rx_buffer
)
278 dev_kfree_skb(rx_ring
->skb
);
281 /* Free all the Rx ring sk_buffs */
282 for (i
= 0; i
< rx_ring
->count
; i
++) {
283 struct fm10k_rx_buffer
*buffer
= &rx_ring
->rx_buffer
[i
];
284 /* clean-up will only set page pointer to NULL */
288 dma_unmap_page(rx_ring
->dev
, buffer
->dma
,
289 PAGE_SIZE
, DMA_FROM_DEVICE
);
290 __free_page(buffer
->page
);
295 size
= sizeof(struct fm10k_rx_buffer
) * rx_ring
->count
;
296 memset(rx_ring
->rx_buffer
, 0, size
);
298 /* Zero out the descriptor ring */
299 memset(rx_ring
->desc
, 0, rx_ring
->size
);
301 rx_ring
->next_to_alloc
= 0;
302 rx_ring
->next_to_clean
= 0;
303 rx_ring
->next_to_use
= 0;
307 * fm10k_free_rx_resources - Free Rx Resources
308 * @rx_ring: ring to clean the resources from
310 * Free all receive software resources
312 void fm10k_free_rx_resources(struct fm10k_ring
*rx_ring
)
314 fm10k_clean_rx_ring(rx_ring
);
316 vfree(rx_ring
->rx_buffer
);
317 rx_ring
->rx_buffer
= NULL
;
319 /* if not set, then don't free */
323 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
324 rx_ring
->desc
, rx_ring
->dma
);
326 rx_ring
->desc
= NULL
;
330 * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues
331 * @interface: board private structure
333 void fm10k_clean_all_rx_rings(struct fm10k_intfc
*interface
)
337 for (i
= 0; i
< interface
->num_rx_queues
; i
++)
338 fm10k_clean_rx_ring(interface
->rx_ring
[i
]);
342 * fm10k_free_all_rx_resources - Free Rx Resources for All Queues
343 * @interface: board private structure
345 * Free all receive software resources
347 static void fm10k_free_all_rx_resources(struct fm10k_intfc
*interface
)
349 int i
= interface
->num_rx_queues
;
352 fm10k_free_rx_resources(interface
->rx_ring
[i
]);
356 * fm10k_request_glort_range - Request GLORTs for use in configuring rules
357 * @interface: board private structure
359 * This function allocates a range of glorts for this inteface to use.
361 static void fm10k_request_glort_range(struct fm10k_intfc
*interface
)
363 struct fm10k_hw
*hw
= &interface
->hw
;
364 u16 mask
= (~hw
->mac
.dglort_map
) >> FM10K_DGLORTMAP_MASK_SHIFT
;
366 /* establish GLORT base */
367 interface
->glort
= hw
->mac
.dglort_map
& FM10K_DGLORTMAP_NONE
;
368 interface
->glort_count
= 0;
370 /* nothing we can do until mask is allocated */
371 if (hw
->mac
.dglort_map
== FM10K_DGLORTMAP_NONE
)
374 /* we support 3 possible GLORT configurations.
375 * 1: VFs consume all but the last 1
376 * 2: VFs and PF split glorts with possible gap between
377 * 3: VFs allocated first 64, all others belong to PF
379 if (mask
<= hw
->iov
.total_vfs
) {
380 interface
->glort_count
= 1;
381 interface
->glort
+= mask
;
382 } else if (mask
< 64) {
383 interface
->glort_count
= (mask
+ 1) / 2;
384 interface
->glort
+= interface
->glort_count
;
386 interface
->glort_count
= mask
- 63;
387 interface
->glort
+= 64;
392 * fm10k_del_vxlan_port_all
393 * @interface: board private structure
395 * This function frees the entire vxlan_port list
397 static void fm10k_del_vxlan_port_all(struct fm10k_intfc
*interface
)
399 struct fm10k_vxlan_port
*vxlan_port
;
401 /* flush all entries from list */
402 vxlan_port
= list_first_entry_or_null(&interface
->vxlan_port
,
403 struct fm10k_vxlan_port
, list
);
405 list_del(&vxlan_port
->list
);
407 vxlan_port
= list_first_entry_or_null(&interface
->vxlan_port
,
408 struct fm10k_vxlan_port
,
414 * fm10k_restore_vxlan_port
415 * @interface: board private structure
417 * This function restores the value in the tunnel_cfg register after reset
419 static void fm10k_restore_vxlan_port(struct fm10k_intfc
*interface
)
421 struct fm10k_hw
*hw
= &interface
->hw
;
422 struct fm10k_vxlan_port
*vxlan_port
;
424 /* only the PF supports configuring tunnels */
425 if (hw
->mac
.type
!= fm10k_mac_pf
)
428 vxlan_port
= list_first_entry_or_null(&interface
->vxlan_port
,
429 struct fm10k_vxlan_port
, list
);
431 /* restore tunnel configuration register */
432 fm10k_write_reg(hw
, FM10K_TUNNEL_CFG
,
433 (vxlan_port
? ntohs(vxlan_port
->port
) : 0) |
434 (ETH_P_TEB
<< FM10K_TUNNEL_CFG_NVGRE_SHIFT
));
438 * fm10k_add_vxlan_port
439 * @netdev: network interface device structure
440 * @sa_family: Address family of new port
441 * @port: port number used for VXLAN
443 * This funciton is called when a new VXLAN interface has added a new port
444 * number to the range that is currently in use for VXLAN. The new port
445 * number is always added to the tail so that the port number list should
446 * match the order in which the ports were allocated. The head of the list
447 * is always used as the VXLAN port number for offloads.
449 static void fm10k_add_vxlan_port(struct net_device
*dev
,
450 sa_family_t sa_family
, __be16 port
) {
451 struct fm10k_intfc
*interface
= netdev_priv(dev
);
452 struct fm10k_vxlan_port
*vxlan_port
;
454 /* only the PF supports configuring tunnels */
455 if (interface
->hw
.mac
.type
!= fm10k_mac_pf
)
458 /* existing ports are pulled out so our new entry is always last */
459 fm10k_vxlan_port_for_each(vxlan_port
, interface
) {
460 if ((vxlan_port
->port
== port
) &&
461 (vxlan_port
->sa_family
== sa_family
)) {
462 list_del(&vxlan_port
->list
);
467 /* allocate memory to track ports */
468 vxlan_port
= kmalloc(sizeof(*vxlan_port
), GFP_ATOMIC
);
471 vxlan_port
->port
= port
;
472 vxlan_port
->sa_family
= sa_family
;
475 /* add new port value to list */
476 list_add_tail(&vxlan_port
->list
, &interface
->vxlan_port
);
478 fm10k_restore_vxlan_port(interface
);
482 * fm10k_del_vxlan_port
483 * @netdev: network interface device structure
484 * @sa_family: Address family of freed port
485 * @port: port number used for VXLAN
487 * This funciton is called when a new VXLAN interface has freed a port
488 * number from the range that is currently in use for VXLAN. The freed
489 * port is removed from the list and the new head is used to determine
490 * the port number for offloads.
492 static void fm10k_del_vxlan_port(struct net_device
*dev
,
493 sa_family_t sa_family
, __be16 port
) {
494 struct fm10k_intfc
*interface
= netdev_priv(dev
);
495 struct fm10k_vxlan_port
*vxlan_port
;
497 if (interface
->hw
.mac
.type
!= fm10k_mac_pf
)
500 /* find the port in the list and free it */
501 fm10k_vxlan_port_for_each(vxlan_port
, interface
) {
502 if ((vxlan_port
->port
== port
) &&
503 (vxlan_port
->sa_family
== sa_family
)) {
504 list_del(&vxlan_port
->list
);
510 fm10k_restore_vxlan_port(interface
);
514 * fm10k_open - Called when a network interface is made active
515 * @netdev: network interface device structure
517 * Returns 0 on success, negative value on failure
519 * The open entry point is called when a network interface is made
520 * active by the system (IFF_UP). At this point all resources needed
521 * for transmit and receive operations are allocated, the interrupt
522 * handler is registered with the OS, the watchdog timer is started,
523 * and the stack is notified that the interface is ready.
525 int fm10k_open(struct net_device
*netdev
)
527 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
530 /* allocate transmit descriptors */
531 err
= fm10k_setup_all_tx_resources(interface
);
535 /* allocate receive descriptors */
536 err
= fm10k_setup_all_rx_resources(interface
);
540 /* allocate interrupt resources */
541 err
= fm10k_qv_request_irq(interface
);
545 /* setup GLORT assignment for this port */
546 fm10k_request_glort_range(interface
);
548 /* Notify the stack of the actual queue counts */
550 err
= netif_set_real_num_rx_queues(netdev
,
551 interface
->num_rx_queues
);
555 #if IS_ENABLED(CONFIG_VXLAN)
556 /* update VXLAN port configuration */
557 vxlan_get_rx_port(netdev
);
565 fm10k_qv_free_irq(interface
);
567 fm10k_free_all_rx_resources(interface
);
569 fm10k_free_all_tx_resources(interface
);
575 * fm10k_close - Disables a network interface
576 * @netdev: network interface device structure
578 * Returns 0, this is not allowed to fail
580 * The close entry point is called when an interface is de-activated
581 * by the OS. The hardware is still under the drivers control, but
582 * needs to be disabled. A global MAC reset is issued to stop the
583 * hardware, and all transmit and receive resources are freed.
585 int fm10k_close(struct net_device
*netdev
)
587 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
589 fm10k_down(interface
);
591 fm10k_qv_free_irq(interface
);
593 fm10k_del_vxlan_port_all(interface
);
595 fm10k_free_all_tx_resources(interface
);
596 fm10k_free_all_rx_resources(interface
);
601 static netdev_tx_t
fm10k_xmit_frame(struct sk_buff
*skb
, struct net_device
*dev
)
603 struct fm10k_intfc
*interface
= netdev_priv(dev
);
604 unsigned int r_idx
= 0;
607 if ((skb
->protocol
== htons(ETH_P_8021Q
)) &&
608 !vlan_tx_tag_present(skb
)) {
609 /* FM10K only supports hardware tagging, any tags in frame
610 * are considered 2nd level or "outer" tags
612 struct vlan_hdr
*vhdr
;
615 /* make sure skb is not shared */
616 skb
= skb_share_check(skb
, GFP_ATOMIC
);
620 /* make sure there is enough room to move the ethernet header */
621 if (unlikely(!pskb_may_pull(skb
, VLAN_ETH_HLEN
)))
624 /* verify the skb head is not shared */
625 err
= skb_cow_head(skb
, 0);
629 /* locate vlan header */
630 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
632 /* pull the 2 key pieces of data out of it */
633 __vlan_hwaccel_put_tag(skb
,
635 ntohs(vhdr
->h_vlan_TCI
));
636 proto
= vhdr
->h_vlan_encapsulated_proto
;
637 skb
->protocol
= (ntohs(proto
) >= 1536) ? proto
:
640 /* squash it by moving the ethernet addresses up 4 bytes */
641 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 12);
642 __skb_pull(skb
, VLAN_HLEN
);
643 skb_reset_mac_header(skb
);
646 /* The minimum packet size for a single buffer is 17B so pad the skb
647 * in order to meet this minimum size requirement.
649 if (unlikely(skb
->len
< 17)) {
650 int pad_len
= 17 - skb
->len
;
652 if (skb_pad(skb
, pad_len
))
654 __skb_put(skb
, pad_len
);
657 /* prepare packet for hardware time stamping */
658 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
659 fm10k_ts_tx_enqueue(interface
, skb
);
661 if (r_idx
>= interface
->num_tx_queues
)
662 r_idx
%= interface
->num_tx_queues
;
664 err
= fm10k_xmit_frame_ring(skb
, interface
->tx_ring
[r_idx
]);
669 static int fm10k_change_mtu(struct net_device
*dev
, int new_mtu
)
671 if (new_mtu
< 68 || new_mtu
> FM10K_MAX_JUMBO_FRAME_SIZE
)
680 * fm10k_tx_timeout - Respond to a Tx Hang
681 * @netdev: network interface device structure
683 static void fm10k_tx_timeout(struct net_device
*netdev
)
685 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
686 bool real_tx_hang
= false;
689 #define TX_TIMEO_LIMIT 16000
690 for (i
= 0; i
< interface
->num_tx_queues
; i
++) {
691 struct fm10k_ring
*tx_ring
= interface
->tx_ring
[i
];
693 if (check_for_tx_hang(tx_ring
) && fm10k_check_tx_hang(tx_ring
))
698 fm10k_tx_timeout_reset(interface
);
700 netif_info(interface
, drv
, netdev
,
701 "Fake Tx hang detected with timeout of %d seconds\n",
702 netdev
->watchdog_timeo
/HZ
);
704 /* fake Tx hang - increase the kernel timeout */
705 if (netdev
->watchdog_timeo
< TX_TIMEO_LIMIT
)
706 netdev
->watchdog_timeo
*= 2;
710 static int fm10k_uc_vlan_unsync(struct net_device
*netdev
,
711 const unsigned char *uc_addr
)
713 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
714 struct fm10k_hw
*hw
= &interface
->hw
;
715 u16 glort
= interface
->glort
;
716 u16 vid
= interface
->vid
;
717 bool set
= !!(vid
/ VLAN_N_VID
);
720 /* drop any leading bits on the VLAN ID */
721 vid
&= VLAN_N_VID
- 1;
723 err
= hw
->mac
.ops
.update_uc_addr(hw
, glort
, uc_addr
, vid
, set
, 0);
727 /* return non-zero value as we are only doing a partial sync/unsync */
731 static int fm10k_mc_vlan_unsync(struct net_device
*netdev
,
732 const unsigned char *mc_addr
)
734 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
735 struct fm10k_hw
*hw
= &interface
->hw
;
736 u16 glort
= interface
->glort
;
737 u16 vid
= interface
->vid
;
738 bool set
= !!(vid
/ VLAN_N_VID
);
741 /* drop any leading bits on the VLAN ID */
742 vid
&= VLAN_N_VID
- 1;
744 err
= hw
->mac
.ops
.update_mc_addr(hw
, glort
, mc_addr
, vid
, set
);
748 /* return non-zero value as we are only doing a partial sync/unsync */
752 static int fm10k_update_vid(struct net_device
*netdev
, u16 vid
, bool set
)
754 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
755 struct fm10k_hw
*hw
= &interface
->hw
;
758 /* updates do not apply to VLAN 0 */
762 if (vid
>= VLAN_N_VID
)
765 /* Verify we have permission to add VLANs */
766 if (hw
->mac
.vlan_override
)
769 /* if default VLAN is already present do nothing */
770 if (vid
== hw
->mac
.default_vid
)
773 /* update active_vlans bitmask */
774 set_bit(vid
, interface
->active_vlans
);
776 clear_bit(vid
, interface
->active_vlans
);
778 fm10k_mbx_lock(interface
);
780 /* only need to update the VLAN if not in promiscous mode */
781 if (!(netdev
->flags
& IFF_PROMISC
)) {
782 err
= hw
->mac
.ops
.update_vlan(hw
, vid
, 0, set
);
787 /* update our base MAC address */
788 err
= hw
->mac
.ops
.update_uc_addr(hw
, interface
->glort
, hw
->mac
.addr
,
793 /* set vid prior to syncing/unsyncing the VLAN */
794 interface
->vid
= vid
+ (set
? VLAN_N_VID
: 0);
796 /* Update the unicast and multicast address list to add/drop VLAN */
797 __dev_uc_unsync(netdev
, fm10k_uc_vlan_unsync
);
798 __dev_mc_unsync(netdev
, fm10k_mc_vlan_unsync
);
800 fm10k_mbx_unlock(interface
);
805 static int fm10k_vlan_rx_add_vid(struct net_device
*netdev
,
806 __always_unused __be16 proto
, u16 vid
)
808 /* update VLAN and address table based on changes */
809 return fm10k_update_vid(netdev
, vid
, true);
812 static int fm10k_vlan_rx_kill_vid(struct net_device
*netdev
,
813 __always_unused __be16 proto
, u16 vid
)
815 /* update VLAN and address table based on changes */
816 return fm10k_update_vid(netdev
, vid
, false);
819 static u16
fm10k_find_next_vlan(struct fm10k_intfc
*interface
, u16 vid
)
821 struct fm10k_hw
*hw
= &interface
->hw
;
822 u16 default_vid
= hw
->mac
.default_vid
;
823 u16 vid_limit
= vid
< default_vid
? default_vid
: VLAN_N_VID
;
825 vid
= find_next_bit(interface
->active_vlans
, vid_limit
, ++vid
);
830 static void fm10k_clear_unused_vlans(struct fm10k_intfc
*interface
)
832 struct fm10k_hw
*hw
= &interface
->hw
;
835 /* loop through and find any gaps in the table */
836 for (vid
= 0, prev_vid
= 0;
837 prev_vid
< VLAN_N_VID
;
838 prev_vid
= vid
+ 1, vid
= fm10k_find_next_vlan(interface
, vid
)) {
842 /* send request to clear multiple bits at a time */
843 prev_vid
+= (vid
- prev_vid
- 1) << FM10K_VLAN_LENGTH_SHIFT
;
844 hw
->mac
.ops
.update_vlan(hw
, prev_vid
, 0, false);
848 static int __fm10k_uc_sync(struct net_device
*dev
,
849 const unsigned char *addr
, bool sync
)
851 struct fm10k_intfc
*interface
= netdev_priv(dev
);
852 struct fm10k_hw
*hw
= &interface
->hw
;
853 u16 vid
, glort
= interface
->glort
;
856 if (!is_valid_ether_addr(addr
))
857 return -EADDRNOTAVAIL
;
859 /* update table with current entries */
860 for (vid
= hw
->mac
.default_vid
? fm10k_find_next_vlan(interface
, 0) : 0;
862 vid
= fm10k_find_next_vlan(interface
, vid
)) {
863 err
= hw
->mac
.ops
.update_uc_addr(hw
, glort
, addr
,
872 static int fm10k_uc_sync(struct net_device
*dev
,
873 const unsigned char *addr
)
875 return __fm10k_uc_sync(dev
, addr
, true);
878 static int fm10k_uc_unsync(struct net_device
*dev
,
879 const unsigned char *addr
)
881 return __fm10k_uc_sync(dev
, addr
, false);
884 static int fm10k_set_mac(struct net_device
*dev
, void *p
)
886 struct fm10k_intfc
*interface
= netdev_priv(dev
);
887 struct fm10k_hw
*hw
= &interface
->hw
;
888 struct sockaddr
*addr
= p
;
891 if (!is_valid_ether_addr(addr
->sa_data
))
892 return -EADDRNOTAVAIL
;
894 if (dev
->flags
& IFF_UP
) {
895 /* setting MAC address requires mailbox */
896 fm10k_mbx_lock(interface
);
898 err
= fm10k_uc_sync(dev
, addr
->sa_data
);
900 fm10k_uc_unsync(dev
, hw
->mac
.addr
);
902 fm10k_mbx_unlock(interface
);
906 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
907 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
908 dev
->addr_assign_type
&= ~NET_ADDR_RANDOM
;
911 /* if we had a mailbox error suggest trying again */
912 return err
? -EAGAIN
: 0;
915 static int __fm10k_mc_sync(struct net_device
*dev
,
916 const unsigned char *addr
, bool sync
)
918 struct fm10k_intfc
*interface
= netdev_priv(dev
);
919 struct fm10k_hw
*hw
= &interface
->hw
;
920 u16 vid
, glort
= interface
->glort
;
923 if (!is_multicast_ether_addr(addr
))
924 return -EADDRNOTAVAIL
;
926 /* update table with current entries */
927 for (vid
= hw
->mac
.default_vid
? fm10k_find_next_vlan(interface
, 0) : 0;
929 vid
= fm10k_find_next_vlan(interface
, vid
)) {
930 err
= hw
->mac
.ops
.update_mc_addr(hw
, glort
, addr
, vid
, sync
);
938 static int fm10k_mc_sync(struct net_device
*dev
,
939 const unsigned char *addr
)
941 return __fm10k_mc_sync(dev
, addr
, true);
944 static int fm10k_mc_unsync(struct net_device
*dev
,
945 const unsigned char *addr
)
947 return __fm10k_mc_sync(dev
, addr
, false);
950 static void fm10k_set_rx_mode(struct net_device
*dev
)
952 struct fm10k_intfc
*interface
= netdev_priv(dev
);
953 struct fm10k_hw
*hw
= &interface
->hw
;
956 /* no need to update the harwdare if we are not running */
957 if (!(dev
->flags
& IFF_UP
))
960 /* determine new mode based on flags */
961 xcast_mode
= (dev
->flags
& IFF_PROMISC
) ? FM10K_XCAST_MODE_PROMISC
:
962 (dev
->flags
& IFF_ALLMULTI
) ? FM10K_XCAST_MODE_ALLMULTI
:
963 (dev
->flags
& (IFF_BROADCAST
| IFF_MULTICAST
)) ?
964 FM10K_XCAST_MODE_MULTI
: FM10K_XCAST_MODE_NONE
;
966 fm10k_mbx_lock(interface
);
968 /* syncronize all of the addresses */
969 if (xcast_mode
!= FM10K_XCAST_MODE_PROMISC
) {
970 __dev_uc_sync(dev
, fm10k_uc_sync
, fm10k_uc_unsync
);
971 if (xcast_mode
!= FM10K_XCAST_MODE_ALLMULTI
)
972 __dev_mc_sync(dev
, fm10k_mc_sync
, fm10k_mc_unsync
);
975 /* if we aren't changing modes there is nothing to do */
976 if (interface
->xcast_mode
!= xcast_mode
) {
977 /* update VLAN table */
978 if (xcast_mode
== FM10K_XCAST_MODE_PROMISC
)
979 hw
->mac
.ops
.update_vlan(hw
, FM10K_VLAN_ALL
, 0, true);
980 if (interface
->xcast_mode
== FM10K_XCAST_MODE_PROMISC
)
981 fm10k_clear_unused_vlans(interface
);
983 /* update xcast mode */
984 hw
->mac
.ops
.update_xcast_mode(hw
, interface
->glort
, xcast_mode
);
986 /* record updated xcast mode state */
987 interface
->xcast_mode
= xcast_mode
;
990 fm10k_mbx_unlock(interface
);
993 void fm10k_restore_rx_state(struct fm10k_intfc
*interface
)
995 struct net_device
*netdev
= interface
->netdev
;
996 struct fm10k_hw
*hw
= &interface
->hw
;
1000 /* restore our address if perm_addr is set */
1001 if (hw
->mac
.type
== fm10k_mac_vf
) {
1002 if (is_valid_ether_addr(hw
->mac
.perm_addr
)) {
1003 ether_addr_copy(hw
->mac
.addr
, hw
->mac
.perm_addr
);
1004 ether_addr_copy(netdev
->perm_addr
, hw
->mac
.perm_addr
);
1005 ether_addr_copy(netdev
->dev_addr
, hw
->mac
.perm_addr
);
1006 netdev
->addr_assign_type
&= ~NET_ADDR_RANDOM
;
1009 if (hw
->mac
.vlan_override
)
1010 netdev
->features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
1012 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1015 /* record glort for this interface */
1016 glort
= interface
->glort
;
1018 /* convert interface flags to xcast mode */
1019 if (netdev
->flags
& IFF_PROMISC
)
1020 xcast_mode
= FM10K_XCAST_MODE_PROMISC
;
1021 else if (netdev
->flags
& IFF_ALLMULTI
)
1022 xcast_mode
= FM10K_XCAST_MODE_ALLMULTI
;
1023 else if (netdev
->flags
& (IFF_BROADCAST
| IFF_MULTICAST
))
1024 xcast_mode
= FM10K_XCAST_MODE_MULTI
;
1026 xcast_mode
= FM10K_XCAST_MODE_NONE
;
1028 fm10k_mbx_lock(interface
);
1030 /* Enable logical port */
1031 hw
->mac
.ops
.update_lport_state(hw
, glort
, interface
->glort_count
, true);
1033 /* update VLAN table */
1034 hw
->mac
.ops
.update_vlan(hw
, FM10K_VLAN_ALL
, 0,
1035 xcast_mode
== FM10K_XCAST_MODE_PROMISC
);
1037 /* Add filter for VLAN 0 */
1038 hw
->mac
.ops
.update_vlan(hw
, 0, 0, true);
1040 /* update table with current entries */
1041 for (vid
= hw
->mac
.default_vid
? fm10k_find_next_vlan(interface
, 0) : 0;
1043 vid
= fm10k_find_next_vlan(interface
, vid
)) {
1044 hw
->mac
.ops
.update_vlan(hw
, vid
, 0, true);
1045 hw
->mac
.ops
.update_uc_addr(hw
, glort
, hw
->mac
.addr
,
1049 /* syncronize all of the addresses */
1050 if (xcast_mode
!= FM10K_XCAST_MODE_PROMISC
) {
1051 __dev_uc_sync(netdev
, fm10k_uc_sync
, fm10k_uc_unsync
);
1052 if (xcast_mode
!= FM10K_XCAST_MODE_ALLMULTI
)
1053 __dev_mc_sync(netdev
, fm10k_mc_sync
, fm10k_mc_unsync
);
1056 /* update xcast mode */
1057 hw
->mac
.ops
.update_xcast_mode(hw
, glort
, xcast_mode
);
1059 fm10k_mbx_unlock(interface
);
1061 /* record updated xcast mode state */
1062 interface
->xcast_mode
= xcast_mode
;
1064 /* Restore tunnel configuration */
1065 fm10k_restore_vxlan_port(interface
);
1068 void fm10k_reset_rx_state(struct fm10k_intfc
*interface
)
1070 struct net_device
*netdev
= interface
->netdev
;
1071 struct fm10k_hw
*hw
= &interface
->hw
;
1073 fm10k_mbx_lock(interface
);
1075 /* clear the logical port state on lower device */
1076 hw
->mac
.ops
.update_lport_state(hw
, interface
->glort
,
1077 interface
->glort_count
, false);
1079 fm10k_mbx_unlock(interface
);
1081 /* reset flags to default state */
1082 interface
->xcast_mode
= FM10K_XCAST_MODE_NONE
;
1084 /* clear the sync flag since the lport has been dropped */
1085 __dev_uc_unsync(netdev
, NULL
);
1086 __dev_mc_unsync(netdev
, NULL
);
1090 * fm10k_get_stats64 - Get System Network Statistics
1091 * @netdev: network interface device structure
1092 * @stats: storage space for 64bit statistics
1094 * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
1095 * function replaces fm10k_get_stats for kernels which support it.
1097 static struct rtnl_link_stats64
*fm10k_get_stats64(struct net_device
*netdev
,
1098 struct rtnl_link_stats64
*stats
)
1100 struct fm10k_intfc
*interface
= netdev_priv(netdev
);
1101 struct fm10k_ring
*ring
;
1102 unsigned int start
, i
;
1107 for (i
= 0; i
< interface
->num_rx_queues
; i
++) {
1108 ring
= ACCESS_ONCE(interface
->rx_ring
[i
]);
1114 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1115 packets
= ring
->stats
.packets
;
1116 bytes
= ring
->stats
.bytes
;
1117 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1119 stats
->rx_packets
+= packets
;
1120 stats
->rx_bytes
+= bytes
;
1123 for (i
= 0; i
< interface
->num_tx_queues
; i
++) {
1124 ring
= ACCESS_ONCE(interface
->rx_ring
[i
]);
1130 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1131 packets
= ring
->stats
.packets
;
1132 bytes
= ring
->stats
.bytes
;
1133 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1135 stats
->tx_packets
+= packets
;
1136 stats
->tx_bytes
+= bytes
;
1141 /* following stats updated by fm10k_service_task() */
1142 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1147 int fm10k_setup_tc(struct net_device
*dev
, u8 tc
)
1149 struct fm10k_intfc
*interface
= netdev_priv(dev
);
1151 /* Currently only the PF supports priority classes */
1152 if (tc
&& (interface
->hw
.mac
.type
!= fm10k_mac_pf
))
1155 /* Hardware supports up to 8 traffic classes */
1159 /* Hardware has to reinitialize queues to match packet
1160 * buffer alignment. Unfortunately, the hardware is not
1161 * flexible enough to do this dynamically.
1163 if (netif_running(dev
))
1166 fm10k_mbx_free_irq(interface
);
1168 fm10k_clear_queueing_scheme(interface
);
1170 /* we expect the prio_tc map to be repopulated later */
1171 netdev_reset_tc(dev
);
1172 netdev_set_num_tc(dev
, tc
);
1174 fm10k_init_queueing_scheme(interface
);
1176 fm10k_mbx_request_irq(interface
);
1178 if (netif_running(dev
))
1181 /* flag to indicate SWPRI has yet to be updated */
1182 interface
->flags
|= FM10K_FLAG_SWPRI_CONFIG
;
1187 static int fm10k_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1191 return fm10k_get_ts_config(netdev
, ifr
);
1193 return fm10k_set_ts_config(netdev
, ifr
);
1199 static void fm10k_assign_l2_accel(struct fm10k_intfc
*interface
,
1200 struct fm10k_l2_accel
*l2_accel
)
1202 struct fm10k_ring
*ring
;
1205 for (i
= 0; i
< interface
->num_rx_queues
; i
++) {
1206 ring
= interface
->rx_ring
[i
];
1207 rcu_assign_pointer(ring
->l2_accel
, l2_accel
);
1210 interface
->l2_accel
= l2_accel
;
1213 static void *fm10k_dfwd_add_station(struct net_device
*dev
,
1214 struct net_device
*sdev
)
1216 struct fm10k_intfc
*interface
= netdev_priv(dev
);
1217 struct fm10k_l2_accel
*l2_accel
= interface
->l2_accel
;
1218 struct fm10k_l2_accel
*old_l2_accel
= NULL
;
1219 struct fm10k_dglort_cfg dglort
= { 0 };
1220 struct fm10k_hw
*hw
= &interface
->hw
;
1224 /* allocate l2 accel structure if it is not available */
1226 /* verify there is enough free GLORTs to support l2_accel */
1227 if (interface
->glort_count
< 7)
1228 return ERR_PTR(-EBUSY
);
1230 size
= offsetof(struct fm10k_l2_accel
, macvlan
[7]);
1231 l2_accel
= kzalloc(size
, GFP_KERNEL
);
1233 return ERR_PTR(-ENOMEM
);
1236 l2_accel
->dglort
= interface
->glort
;
1238 /* update pointers */
1239 fm10k_assign_l2_accel(interface
, l2_accel
);
1240 /* do not expand if we are at our limit */
1241 } else if ((l2_accel
->count
== FM10K_MAX_STATIONS
) ||
1242 (l2_accel
->count
== (interface
->glort_count
- 1))) {
1243 return ERR_PTR(-EBUSY
);
1244 /* expand if we have hit the size limit */
1245 } else if (l2_accel
->count
== l2_accel
->size
) {
1246 old_l2_accel
= l2_accel
;
1247 size
= offsetof(struct fm10k_l2_accel
,
1248 macvlan
[(l2_accel
->size
* 2) + 1]);
1249 l2_accel
= kzalloc(size
, GFP_KERNEL
);
1251 return ERR_PTR(-ENOMEM
);
1253 memcpy(l2_accel
, old_l2_accel
,
1254 offsetof(struct fm10k_l2_accel
,
1255 macvlan
[old_l2_accel
->size
]));
1257 l2_accel
->size
= (old_l2_accel
->size
* 2) + 1;
1259 /* update pointers */
1260 fm10k_assign_l2_accel(interface
, l2_accel
);
1261 kfree_rcu(old_l2_accel
, rcu
);
1264 /* add macvlan to accel table, and record GLORT for position */
1265 for (i
= 0; i
< l2_accel
->size
; i
++) {
1266 if (!l2_accel
->macvlan
[i
])
1270 /* record station */
1271 l2_accel
->macvlan
[i
] = sdev
;
1274 /* configure default DGLORT mapping for RSS/DCB */
1275 dglort
.idx
= fm10k_dglort_pf_rss
;
1276 dglort
.inner_rss
= 1;
1277 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
1278 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
1279 dglort
.glort
= interface
->glort
;
1280 dglort
.shared_l
= fls(l2_accel
->size
);
1281 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
1283 /* Add rules for this specific dglort to the switch */
1284 fm10k_mbx_lock(interface
);
1286 glort
= l2_accel
->dglort
+ 1 + i
;
1287 hw
->mac
.ops
.update_xcast_mode(hw
, glort
, FM10K_XCAST_MODE_MULTI
);
1288 hw
->mac
.ops
.update_uc_addr(hw
, glort
, sdev
->dev_addr
, 0, true, 0);
1290 fm10k_mbx_unlock(interface
);
1295 static void fm10k_dfwd_del_station(struct net_device
*dev
, void *priv
)
1297 struct fm10k_intfc
*interface
= netdev_priv(dev
);
1298 struct fm10k_l2_accel
*l2_accel
= ACCESS_ONCE(interface
->l2_accel
);
1299 struct fm10k_dglort_cfg dglort
= { 0 };
1300 struct fm10k_hw
*hw
= &interface
->hw
;
1301 struct net_device
*sdev
= priv
;
1308 /* search table for matching interface */
1309 for (i
= 0; i
< l2_accel
->size
; i
++) {
1310 if (l2_accel
->macvlan
[i
] == sdev
)
1314 /* exit if macvlan not found */
1315 if (i
== l2_accel
->size
)
1318 /* Remove any rules specific to this dglort */
1319 fm10k_mbx_lock(interface
);
1321 glort
= l2_accel
->dglort
+ 1 + i
;
1322 hw
->mac
.ops
.update_xcast_mode(hw
, glort
, FM10K_XCAST_MODE_NONE
);
1323 hw
->mac
.ops
.update_uc_addr(hw
, glort
, sdev
->dev_addr
, 0, false, 0);
1325 fm10k_mbx_unlock(interface
);
1327 /* record removal */
1328 l2_accel
->macvlan
[i
] = NULL
;
1331 /* configure default DGLORT mapping for RSS/DCB */
1332 dglort
.idx
= fm10k_dglort_pf_rss
;
1333 dglort
.inner_rss
= 1;
1334 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
1335 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
1336 dglort
.glort
= interface
->glort
;
1338 dglort
.shared_l
= fls(l2_accel
->size
);
1339 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
1341 /* If table is empty remove it */
1342 if (l2_accel
->count
== 0) {
1343 fm10k_assign_l2_accel(interface
, NULL
);
1344 kfree_rcu(l2_accel
, rcu
);
1348 static const struct net_device_ops fm10k_netdev_ops
= {
1349 .ndo_open
= fm10k_open
,
1350 .ndo_stop
= fm10k_close
,
1351 .ndo_validate_addr
= eth_validate_addr
,
1352 .ndo_start_xmit
= fm10k_xmit_frame
,
1353 .ndo_set_mac_address
= fm10k_set_mac
,
1354 .ndo_change_mtu
= fm10k_change_mtu
,
1355 .ndo_tx_timeout
= fm10k_tx_timeout
,
1356 .ndo_vlan_rx_add_vid
= fm10k_vlan_rx_add_vid
,
1357 .ndo_vlan_rx_kill_vid
= fm10k_vlan_rx_kill_vid
,
1358 .ndo_set_rx_mode
= fm10k_set_rx_mode
,
1359 .ndo_get_stats64
= fm10k_get_stats64
,
1360 .ndo_setup_tc
= fm10k_setup_tc
,
1361 .ndo_set_vf_mac
= fm10k_ndo_set_vf_mac
,
1362 .ndo_set_vf_vlan
= fm10k_ndo_set_vf_vlan
,
1363 .ndo_set_vf_rate
= fm10k_ndo_set_vf_bw
,
1364 .ndo_get_vf_config
= fm10k_ndo_get_vf_config
,
1365 .ndo_add_vxlan_port
= fm10k_add_vxlan_port
,
1366 .ndo_del_vxlan_port
= fm10k_del_vxlan_port
,
1367 .ndo_do_ioctl
= fm10k_ioctl
,
1368 .ndo_dfwd_add_station
= fm10k_dfwd_add_station
,
1369 .ndo_dfwd_del_station
= fm10k_dfwd_del_station
,
1372 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
1374 struct net_device
*fm10k_alloc_netdev(void)
1376 struct fm10k_intfc
*interface
;
1377 struct net_device
*dev
;
1379 dev
= alloc_etherdev_mq(sizeof(struct fm10k_intfc
), MAX_QUEUES
);
1383 /* set net device and ethtool ops */
1384 dev
->netdev_ops
= &fm10k_netdev_ops
;
1385 fm10k_set_ethtool_ops(dev
);
1387 /* configure default debug level */
1388 interface
= netdev_priv(dev
);
1389 interface
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
1391 /* configure default features */
1392 dev
->features
|= NETIF_F_IP_CSUM
|
1398 NETIF_F_GSO_UDP_TUNNEL
|
1402 /* all features defined to this point should be changeable */
1403 dev
->hw_features
|= dev
->features
;
1405 /* allow user to enable L2 forwarding acceleration */
1406 dev
->hw_features
|= NETIF_F_HW_L2FW_DOFFLOAD
;
1408 /* configure VLAN features */
1409 dev
->vlan_features
|= dev
->features
;
1411 /* configure tunnel offloads */
1412 dev
->hw_enc_features
= NETIF_F_IP_CSUM
|
1416 NETIF_F_GSO_UDP_TUNNEL
|
1420 /* we want to leave these both on as we cannot disable VLAN tag
1421 * insertion or stripping on the hardware since it is contained
1422 * in the FTAG and not in the frame itself.
1424 dev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
|
1425 NETIF_F_HW_VLAN_CTAG_RX
|
1426 NETIF_F_HW_VLAN_CTAG_FILTER
;
1428 dev
->priv_flags
|= IFF_UNICAST_FLT
;