2 * Copyright (C) 2005 - 2015 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER
);
29 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 /* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
36 static unsigned int num_vfs
;
37 module_param(num_vfs
, uint
, S_IRUGO
);
38 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
40 static ushort rx_frag_size
= 2048;
41 module_param(rx_frag_size
, ushort
, S_IRUGO
);
42 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
44 static const struct pci_device_id be_dev_ids
[] = {
45 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
46 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
47 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
48 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
49 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
51 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
52 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID6
)},
55 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
56 /* UE Status Low CSR */
57 static const char * const ue_status_low_desc
[] = {
92 /* UE Status High CSR */
93 static const char * const ue_status_hi_desc
[] = {
128 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
130 struct be_dma_mem
*mem
= &q
->dma_mem
;
133 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
139 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
140 u16 len
, u16 entry_size
)
142 struct be_dma_mem
*mem
= &q
->dma_mem
;
144 memset(q
, 0, sizeof(*q
));
146 q
->entry_size
= entry_size
;
147 mem
->size
= len
* entry_size
;
148 mem
->va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
155 static void be_reg_intr_set(struct be_adapter
*adapter
, bool enable
)
159 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
161 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
163 if (!enabled
&& enable
)
164 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
165 else if (enabled
&& !enable
)
166 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
170 pci_write_config_dword(adapter
->pdev
,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
174 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter
))
182 if (be_check_error(adapter
, BE_ERROR_EEH
))
185 status
= be_cmd_intr_set(adapter
, enable
);
187 be_reg_intr_set(adapter
, enable
);
190 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
194 if (be_check_error(adapter
, BE_ERROR_HW
))
197 val
|= qid
& DB_RQ_RING_ID_MASK
;
198 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
201 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
204 static void be_txq_notify(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
209 if (be_check_error(adapter
, BE_ERROR_HW
))
212 val
|= txo
->q
.id
& DB_TXULP_RING_ID_MASK
;
213 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
216 iowrite32(val
, adapter
->db
+ txo
->db_offset
);
219 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
220 bool arm
, bool clear_int
, u16 num_popped
,
221 u32 eq_delay_mult_enc
)
225 val
|= qid
& DB_EQ_RING_ID_MASK
;
226 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) << DB_EQ_RING_ID_EXT_MASK_SHIFT
);
228 if (be_check_error(adapter
, BE_ERROR_HW
))
232 val
|= 1 << DB_EQ_REARM_SHIFT
;
234 val
|= 1 << DB_EQ_CLR_SHIFT
;
235 val
|= 1 << DB_EQ_EVNT_SHIFT
;
236 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
237 val
|= eq_delay_mult_enc
<< DB_EQ_R2I_DLY_SHIFT
;
238 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
241 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
245 val
|= qid
& DB_CQ_RING_ID_MASK
;
246 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
249 if (be_check_error(adapter
, BE_ERROR_HW
))
253 val
|= 1 << DB_CQ_REARM_SHIFT
;
254 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
255 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
258 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
260 struct be_adapter
*adapter
= netdev_priv(netdev
);
261 struct device
*dev
= &adapter
->pdev
->dev
;
262 struct sockaddr
*addr
= p
;
265 u32 old_pmac_id
= adapter
->pmac_id
[0], curr_pmac_id
= 0;
267 if (!is_valid_ether_addr(addr
->sa_data
))
268 return -EADDRNOTAVAIL
;
270 /* Proceed further only if, User provided MAC is different
273 if (ether_addr_equal(addr
->sa_data
, netdev
->dev_addr
))
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev
))
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
286 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
287 adapter
->if_handle
, &adapter
->pmac_id
[0], 0);
289 curr_pmac_id
= adapter
->pmac_id
[0];
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
294 if (adapter
->pmac_id
[0] != old_pmac_id
)
295 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
299 /* Decide if the new MAC is successfully activated only after
302 status
= be_cmd_get_active_mac(adapter
, curr_pmac_id
, mac
,
303 adapter
->if_handle
, true, 0);
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
310 if (!ether_addr_equal(addr
->sa_data
, mac
)) {
315 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
316 dev_info(dev
, "MAC address changed to %pM\n", addr
->sa_data
);
319 dev_warn(dev
, "MAC address change to %pM failed\n", addr
->sa_data
);
323 /* BE2 supports only v0 cmd */
324 static void *hw_stats_from_cmd(struct be_adapter
*adapter
)
326 if (BE2_chip(adapter
)) {
327 struct be_cmd_resp_get_stats_v0
*cmd
= adapter
->stats_cmd
.va
;
329 return &cmd
->hw_stats
;
330 } else if (BE3_chip(adapter
)) {
331 struct be_cmd_resp_get_stats_v1
*cmd
= adapter
->stats_cmd
.va
;
333 return &cmd
->hw_stats
;
335 struct be_cmd_resp_get_stats_v2
*cmd
= adapter
->stats_cmd
.va
;
337 return &cmd
->hw_stats
;
341 /* BE2 supports only v0 cmd */
342 static void *be_erx_stats_from_cmd(struct be_adapter
*adapter
)
344 if (BE2_chip(adapter
)) {
345 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
347 return &hw_stats
->erx
;
348 } else if (BE3_chip(adapter
)) {
349 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
351 return &hw_stats
->erx
;
353 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
355 return &hw_stats
->erx
;
359 static void populate_be_v0_stats(struct be_adapter
*adapter
)
361 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
362 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
363 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
364 struct be_port_rxf_stats_v0
*port_stats
=
365 &rxf_stats
->port
[adapter
->port_num
];
366 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
368 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
369 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
370 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
371 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
372 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
373 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
374 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
375 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
376 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
377 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
378 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
379 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
380 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
381 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
382 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
383 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
384 drvs
->rx_dropped_header_too_small
=
385 port_stats
->rx_dropped_header_too_small
;
386 drvs
->rx_address_filtered
=
387 port_stats
->rx_address_filtered
+
388 port_stats
->rx_vlan_filtered
;
389 drvs
->rx_alignment_symbol_errors
=
390 port_stats
->rx_alignment_symbol_errors
;
392 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
393 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
395 if (adapter
->port_num
)
396 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
398 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
399 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
400 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
401 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
402 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
403 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
404 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
405 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
408 static void populate_be_v1_stats(struct be_adapter
*adapter
)
410 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
411 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
412 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
413 struct be_port_rxf_stats_v1
*port_stats
=
414 &rxf_stats
->port
[adapter
->port_num
];
415 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
417 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
418 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
419 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
420 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
421 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
422 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
423 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
424 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
425 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
426 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
427 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
428 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
429 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
430 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
431 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
432 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
433 drvs
->rx_dropped_header_too_small
=
434 port_stats
->rx_dropped_header_too_small
;
435 drvs
->rx_input_fifo_overflow_drop
=
436 port_stats
->rx_input_fifo_overflow_drop
;
437 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
438 drvs
->rx_alignment_symbol_errors
=
439 port_stats
->rx_alignment_symbol_errors
;
440 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
441 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
442 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
443 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
444 drvs
->jabber_events
= port_stats
->jabber_events
;
445 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
446 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
447 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
448 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
449 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
450 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
451 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
454 static void populate_be_v2_stats(struct be_adapter
*adapter
)
456 struct be_hw_stats_v2
*hw_stats
= hw_stats_from_cmd(adapter
);
457 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
458 struct be_rxf_stats_v2
*rxf_stats
= &hw_stats
->rxf
;
459 struct be_port_rxf_stats_v2
*port_stats
=
460 &rxf_stats
->port
[adapter
->port_num
];
461 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
463 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
464 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
465 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
466 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
467 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
468 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
469 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
470 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
471 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
472 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
473 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
474 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
475 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
476 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
477 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
478 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
479 drvs
->rx_dropped_header_too_small
=
480 port_stats
->rx_dropped_header_too_small
;
481 drvs
->rx_input_fifo_overflow_drop
=
482 port_stats
->rx_input_fifo_overflow_drop
;
483 drvs
->rx_address_filtered
= port_stats
->rx_address_filtered
;
484 drvs
->rx_alignment_symbol_errors
=
485 port_stats
->rx_alignment_symbol_errors
;
486 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
487 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
488 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
489 drvs
->tx_priority_pauseframes
= port_stats
->tx_priority_pauseframes
;
490 drvs
->jabber_events
= port_stats
->jabber_events
;
491 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
492 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
493 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
494 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
495 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
496 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
497 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
498 if (be_roce_supported(adapter
)) {
499 drvs
->rx_roce_bytes_lsd
= port_stats
->roce_bytes_received_lsd
;
500 drvs
->rx_roce_bytes_msd
= port_stats
->roce_bytes_received_msd
;
501 drvs
->rx_roce_frames
= port_stats
->roce_frames_received
;
502 drvs
->roce_drops_crc
= port_stats
->roce_drops_crc
;
503 drvs
->roce_drops_payload_len
=
504 port_stats
->roce_drops_payload_len
;
508 static void populate_lancer_stats(struct be_adapter
*adapter
)
510 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
511 struct lancer_pport_stats
*pport_stats
= pport_stats_from_cmd(adapter
);
513 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
514 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
515 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
516 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
517 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
518 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
519 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
520 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
521 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
522 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
523 drvs
->rx_dropped_tcp_length
=
524 pport_stats
->rx_dropped_invalid_tcp_length
;
525 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
526 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
527 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
528 drvs
->rx_dropped_header_too_small
=
529 pport_stats
->rx_dropped_header_too_small
;
530 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
531 drvs
->rx_address_filtered
=
532 pport_stats
->rx_address_filtered
+
533 pport_stats
->rx_vlan_filtered
;
534 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
535 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
536 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
537 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
538 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
539 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
540 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
541 drvs
->rx_drops_too_many_frags
=
542 pport_stats
->rx_drops_too_many_frags_lo
;
545 static void accumulate_16bit_val(u32
*acc
, u16 val
)
547 #define lo(x) (x & 0xFFFF)
548 #define hi(x) (x & 0xFFFF0000)
549 bool wrapped
= val
< lo(*acc
);
550 u32 newacc
= hi(*acc
) + val
;
554 ACCESS_ONCE(*acc
) = newacc
;
557 static void populate_erx_stats(struct be_adapter
*adapter
,
558 struct be_rx_obj
*rxo
, u32 erx_stat
)
560 if (!BEx_chip(adapter
))
561 rx_stats(rxo
)->rx_drops_no_frags
= erx_stat
;
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
566 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
570 void be_parse_stats(struct be_adapter
*adapter
)
572 struct be_erx_stats_v2
*erx
= be_erx_stats_from_cmd(adapter
);
573 struct be_rx_obj
*rxo
;
577 if (lancer_chip(adapter
)) {
578 populate_lancer_stats(adapter
);
580 if (BE2_chip(adapter
))
581 populate_be_v0_stats(adapter
);
582 else if (BE3_chip(adapter
))
584 populate_be_v1_stats(adapter
);
586 populate_be_v2_stats(adapter
);
588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
589 for_all_rx_queues(adapter
, rxo
, i
) {
590 erx_stat
= erx
->rx_drops_no_fragments
[rxo
->q
.id
];
591 populate_erx_stats(adapter
, rxo
, erx_stat
);
596 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
597 struct rtnl_link_stats64
*stats
)
599 struct be_adapter
*adapter
= netdev_priv(netdev
);
600 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
601 struct be_rx_obj
*rxo
;
602 struct be_tx_obj
*txo
;
607 for_all_rx_queues(adapter
, rxo
, i
) {
608 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
611 start
= u64_stats_fetch_begin_irq(&rx_stats
->sync
);
612 pkts
= rx_stats(rxo
)->rx_pkts
;
613 bytes
= rx_stats(rxo
)->rx_bytes
;
614 } while (u64_stats_fetch_retry_irq(&rx_stats
->sync
, start
));
615 stats
->rx_packets
+= pkts
;
616 stats
->rx_bytes
+= bytes
;
617 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
618 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
619 rx_stats(rxo
)->rx_drops_no_frags
;
622 for_all_tx_queues(adapter
, txo
, i
) {
623 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
626 start
= u64_stats_fetch_begin_irq(&tx_stats
->sync
);
627 pkts
= tx_stats(txo
)->tx_pkts
;
628 bytes
= tx_stats(txo
)->tx_bytes
;
629 } while (u64_stats_fetch_retry_irq(&tx_stats
->sync
, start
));
630 stats
->tx_packets
+= pkts
;
631 stats
->tx_bytes
+= bytes
;
634 /* bad pkts received */
635 stats
->rx_errors
= drvs
->rx_crc_errors
+
636 drvs
->rx_alignment_symbol_errors
+
637 drvs
->rx_in_range_errors
+
638 drvs
->rx_out_range_errors
+
639 drvs
->rx_frame_too_long
+
640 drvs
->rx_dropped_too_small
+
641 drvs
->rx_dropped_too_short
+
642 drvs
->rx_dropped_header_too_small
+
643 drvs
->rx_dropped_tcp_length
+
644 drvs
->rx_dropped_runt
;
646 /* detailed rx errors */
647 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
648 drvs
->rx_out_range_errors
+
649 drvs
->rx_frame_too_long
;
651 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
653 /* frame alignment errors */
654 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
658 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
659 drvs
->rx_input_fifo_overflow_drop
+
660 drvs
->rx_drops_no_pbuf
;
664 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
666 struct net_device
*netdev
= adapter
->netdev
;
668 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
669 netif_carrier_off(netdev
);
670 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
674 netif_carrier_on(netdev
);
676 netif_carrier_off(netdev
);
678 netdev_info(netdev
, "Link is %s\n", link_status
? "Up" : "Down");
681 static void be_tx_stats_update(struct be_tx_obj
*txo
, struct sk_buff
*skb
)
683 struct be_tx_stats
*stats
= tx_stats(txo
);
685 u64_stats_update_begin(&stats
->sync
);
687 stats
->tx_bytes
+= skb
->len
;
688 stats
->tx_pkts
+= (skb_shinfo(skb
)->gso_segs
? : 1);
689 u64_stats_update_end(&stats
->sync
);
692 /* Returns number of WRBs needed for the skb */
693 static u32
skb_wrb_cnt(struct sk_buff
*skb
)
695 /* +1 for the header wrb */
696 return 1 + (skb_headlen(skb
) ? 1 : 0) + skb_shinfo(skb
)->nr_frags
;
699 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
701 wrb
->frag_pa_hi
= cpu_to_le32(upper_32_bits(addr
));
702 wrb
->frag_pa_lo
= cpu_to_le32(lower_32_bits(addr
));
703 wrb
->frag_len
= cpu_to_le32(len
& ETH_WRB_FRAG_LEN_MASK
);
707 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
708 * to avoid the swap and shift/mask operations in wrb_fill().
710 static inline void wrb_fill_dummy(struct be_eth_wrb
*wrb
)
718 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
724 vlan_tag
= skb_vlan_tag_get(skb
);
725 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
726 /* If vlan priority provided by OS is NOT in available bmap */
727 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
728 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
729 adapter
->recommended_prio
;
734 /* Used only for IP tunnel packets */
735 static u16
skb_inner_ip_proto(struct sk_buff
*skb
)
737 return (inner_ip_hdr(skb
)->version
== 4) ?
738 inner_ip_hdr(skb
)->protocol
: inner_ipv6_hdr(skb
)->nexthdr
;
741 static u16
skb_ip_proto(struct sk_buff
*skb
)
743 return (ip_hdr(skb
)->version
== 4) ?
744 ip_hdr(skb
)->protocol
: ipv6_hdr(skb
)->nexthdr
;
747 static inline bool be_is_txq_full(struct be_tx_obj
*txo
)
749 return atomic_read(&txo
->q
.used
) + BE_MAX_TX_FRAG_COUNT
>= txo
->q
.len
;
752 static inline bool be_can_txq_wake(struct be_tx_obj
*txo
)
754 return atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2;
757 static inline bool be_is_tx_compl_pending(struct be_tx_obj
*txo
)
759 return atomic_read(&txo
->q
.used
) > txo
->pend_wrb_cnt
;
762 static void be_get_wrb_params_from_skb(struct be_adapter
*adapter
,
764 struct be_wrb_params
*wrb_params
)
768 if (skb_is_gso(skb
)) {
769 BE_WRB_F_SET(wrb_params
->features
, LSO
, 1);
770 wrb_params
->lso_mss
= skb_shinfo(skb
)->gso_size
;
771 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
772 BE_WRB_F_SET(wrb_params
->features
, LSO6
, 1);
773 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
774 if (skb
->encapsulation
) {
775 BE_WRB_F_SET(wrb_params
->features
, IPCS
, 1);
776 proto
= skb_inner_ip_proto(skb
);
778 proto
= skb_ip_proto(skb
);
780 if (proto
== IPPROTO_TCP
)
781 BE_WRB_F_SET(wrb_params
->features
, TCPCS
, 1);
782 else if (proto
== IPPROTO_UDP
)
783 BE_WRB_F_SET(wrb_params
->features
, UDPCS
, 1);
786 if (skb_vlan_tag_present(skb
)) {
787 BE_WRB_F_SET(wrb_params
->features
, VLAN
, 1);
788 wrb_params
->vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
791 BE_WRB_F_SET(wrb_params
->features
, CRC
, 1);
794 static void wrb_fill_hdr(struct be_adapter
*adapter
,
795 struct be_eth_hdr_wrb
*hdr
,
796 struct be_wrb_params
*wrb_params
,
799 memset(hdr
, 0, sizeof(*hdr
));
801 SET_TX_WRB_HDR_BITS(crc
, hdr
,
802 BE_WRB_F_GET(wrb_params
->features
, CRC
));
803 SET_TX_WRB_HDR_BITS(ipcs
, hdr
,
804 BE_WRB_F_GET(wrb_params
->features
, IPCS
));
805 SET_TX_WRB_HDR_BITS(tcpcs
, hdr
,
806 BE_WRB_F_GET(wrb_params
->features
, TCPCS
));
807 SET_TX_WRB_HDR_BITS(udpcs
, hdr
,
808 BE_WRB_F_GET(wrb_params
->features
, UDPCS
));
810 SET_TX_WRB_HDR_BITS(lso
, hdr
,
811 BE_WRB_F_GET(wrb_params
->features
, LSO
));
812 SET_TX_WRB_HDR_BITS(lso6
, hdr
,
813 BE_WRB_F_GET(wrb_params
->features
, LSO6
));
814 SET_TX_WRB_HDR_BITS(lso_mss
, hdr
, wrb_params
->lso_mss
);
816 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
817 * hack is not needed, the evt bit is set while ringing DB.
819 SET_TX_WRB_HDR_BITS(event
, hdr
,
820 BE_WRB_F_GET(wrb_params
->features
, VLAN_SKIP_HW
));
821 SET_TX_WRB_HDR_BITS(vlan
, hdr
,
822 BE_WRB_F_GET(wrb_params
->features
, VLAN
));
823 SET_TX_WRB_HDR_BITS(vlan_tag
, hdr
, wrb_params
->vlan_tag
);
825 SET_TX_WRB_HDR_BITS(num_wrb
, hdr
, skb_wrb_cnt(skb
));
826 SET_TX_WRB_HDR_BITS(len
, hdr
, skb
->len
);
827 SET_TX_WRB_HDR_BITS(mgmt
, hdr
,
828 BE_WRB_F_GET(wrb_params
->features
, OS2BMC
));
831 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
835 u32 frag_len
= le32_to_cpu(wrb
->frag_len
);
838 dma
= (u64
)le32_to_cpu(wrb
->frag_pa_hi
) << 32 |
839 (u64
)le32_to_cpu(wrb
->frag_pa_lo
);
842 dma_unmap_single(dev
, dma
, frag_len
, DMA_TO_DEVICE
);
844 dma_unmap_page(dev
, dma
, frag_len
, DMA_TO_DEVICE
);
848 /* Grab a WRB header for xmit */
849 static u16
be_tx_get_wrb_hdr(struct be_tx_obj
*txo
)
851 u16 head
= txo
->q
.head
;
853 queue_head_inc(&txo
->q
);
857 /* Set up the WRB header for xmit */
858 static void be_tx_setup_wrb_hdr(struct be_adapter
*adapter
,
859 struct be_tx_obj
*txo
,
860 struct be_wrb_params
*wrb_params
,
861 struct sk_buff
*skb
, u16 head
)
863 u32 num_frags
= skb_wrb_cnt(skb
);
864 struct be_queue_info
*txq
= &txo
->q
;
865 struct be_eth_hdr_wrb
*hdr
= queue_index_node(txq
, head
);
867 wrb_fill_hdr(adapter
, hdr
, wrb_params
, skb
);
868 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
870 BUG_ON(txo
->sent_skb_list
[head
]);
871 txo
->sent_skb_list
[head
] = skb
;
872 txo
->last_req_hdr
= head
;
873 atomic_add(num_frags
, &txq
->used
);
874 txo
->last_req_wrb_cnt
= num_frags
;
875 txo
->pend_wrb_cnt
+= num_frags
;
878 /* Setup a WRB fragment (buffer descriptor) for xmit */
879 static void be_tx_setup_wrb_frag(struct be_tx_obj
*txo
, dma_addr_t busaddr
,
882 struct be_eth_wrb
*wrb
;
883 struct be_queue_info
*txq
= &txo
->q
;
885 wrb
= queue_head_node(txq
);
886 wrb_fill(wrb
, busaddr
, len
);
890 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
891 * was invoked. The producer index is restored to the previous packet and the
892 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
894 static void be_xmit_restore(struct be_adapter
*adapter
,
895 struct be_tx_obj
*txo
, u16 head
, bool map_single
,
899 struct be_eth_wrb
*wrb
;
900 struct be_queue_info
*txq
= &txo
->q
;
902 dev
= &adapter
->pdev
->dev
;
905 /* skip the first wrb (hdr); it's not mapped */
908 wrb
= queue_head_node(txq
);
909 unmap_tx_frag(dev
, wrb
, map_single
);
911 copied
-= le32_to_cpu(wrb
->frag_len
);
918 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
919 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
920 * of WRBs used up by the packet.
922 static u32
be_xmit_enqueue(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
924 struct be_wrb_params
*wrb_params
)
926 u32 i
, copied
= 0, wrb_cnt
= skb_wrb_cnt(skb
);
927 struct device
*dev
= &adapter
->pdev
->dev
;
928 struct be_queue_info
*txq
= &txo
->q
;
929 bool map_single
= false;
930 u16 head
= txq
->head
;
934 head
= be_tx_get_wrb_hdr(txo
);
936 if (skb
->len
> skb
->data_len
) {
937 len
= skb_headlen(skb
);
939 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
940 if (dma_mapping_error(dev
, busaddr
))
943 be_tx_setup_wrb_frag(txo
, busaddr
, len
);
947 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
948 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
949 len
= skb_frag_size(frag
);
951 busaddr
= skb_frag_dma_map(dev
, frag
, 0, len
, DMA_TO_DEVICE
);
952 if (dma_mapping_error(dev
, busaddr
))
954 be_tx_setup_wrb_frag(txo
, busaddr
, len
);
958 be_tx_setup_wrb_hdr(adapter
, txo
, wrb_params
, skb
, head
);
960 be_tx_stats_update(txo
, skb
);
964 adapter
->drv_stats
.dma_map_errors
++;
965 be_xmit_restore(adapter
, txo
, head
, map_single
, copied
);
969 static inline int qnq_async_evt_rcvd(struct be_adapter
*adapter
)
971 return adapter
->flags
& BE_FLAGS_QNQ_ASYNC_EVT_RCVD
;
974 static struct sk_buff
*be_insert_vlan_in_pkt(struct be_adapter
*adapter
,
981 skb
= skb_share_check(skb
, GFP_ATOMIC
);
985 if (skb_vlan_tag_present(skb
))
986 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
988 if (qnq_async_evt_rcvd(adapter
) && adapter
->pvid
) {
990 vlan_tag
= adapter
->pvid
;
991 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
992 * skip VLAN insertion
994 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
998 skb
= vlan_insert_tag_set_proto(skb
, htons(ETH_P_8021Q
),
1005 /* Insert the outer VLAN, if any */
1006 if (adapter
->qnq_vid
) {
1007 vlan_tag
= adapter
->qnq_vid
;
1008 skb
= vlan_insert_tag_set_proto(skb
, htons(ETH_P_8021Q
),
1012 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
1018 static bool be_ipv6_exthdr_check(struct sk_buff
*skb
)
1020 struct ethhdr
*eh
= (struct ethhdr
*)skb
->data
;
1021 u16 offset
= ETH_HLEN
;
1023 if (eh
->h_proto
== htons(ETH_P_IPV6
)) {
1024 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)(skb
->data
+ offset
);
1026 offset
+= sizeof(struct ipv6hdr
);
1027 if (ip6h
->nexthdr
!= NEXTHDR_TCP
&&
1028 ip6h
->nexthdr
!= NEXTHDR_UDP
) {
1029 struct ipv6_opt_hdr
*ehdr
=
1030 (struct ipv6_opt_hdr
*)(skb
->data
+ offset
);
1032 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1033 if (ehdr
->hdrlen
== 0xff)
1040 static int be_vlan_tag_tx_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
1042 return skb_vlan_tag_present(skb
) || adapter
->pvid
|| adapter
->qnq_vid
;
1045 static int be_ipv6_tx_stall_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
1047 return BE3_chip(adapter
) && be_ipv6_exthdr_check(skb
);
1050 static struct sk_buff
*be_lancer_xmit_workarounds(struct be_adapter
*adapter
,
1051 struct sk_buff
*skb
,
1052 struct be_wrb_params
1055 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
1056 unsigned int eth_hdr_len
;
1059 /* For padded packets, BE HW modifies tot_len field in IP header
1060 * incorrecly when VLAN tag is inserted by HW.
1061 * For padded packets, Lancer computes incorrect checksum.
1063 eth_hdr_len
= ntohs(skb
->protocol
) == ETH_P_8021Q
?
1064 VLAN_ETH_HLEN
: ETH_HLEN
;
1065 if (skb
->len
<= 60 &&
1066 (lancer_chip(adapter
) || skb_vlan_tag_present(skb
)) &&
1068 ip
= (struct iphdr
*)ip_hdr(skb
);
1069 pskb_trim(skb
, eth_hdr_len
+ ntohs(ip
->tot_len
));
1072 /* If vlan tag is already inlined in the packet, skip HW VLAN
1073 * tagging in pvid-tagging mode
1075 if (be_pvid_tagging_enabled(adapter
) &&
1076 veh
->h_vlan_proto
== htons(ETH_P_8021Q
))
1077 BE_WRB_F_SET(wrb_params
->features
, VLAN_SKIP_HW
, 1);
1079 /* HW has a bug wherein it will calculate CSUM for VLAN
1080 * pkts even though it is disabled.
1081 * Manually insert VLAN in pkt.
1083 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
1084 skb_vlan_tag_present(skb
)) {
1085 skb
= be_insert_vlan_in_pkt(adapter
, skb
, wrb_params
);
1090 /* HW may lockup when VLAN HW tagging is requested on
1091 * certain ipv6 packets. Drop such pkts if the HW workaround to
1092 * skip HW tagging is not enabled by FW.
1094 if (unlikely(be_ipv6_tx_stall_chk(adapter
, skb
) &&
1095 (adapter
->pvid
|| adapter
->qnq_vid
) &&
1096 !qnq_async_evt_rcvd(adapter
)))
1099 /* Manual VLAN tag insertion to prevent:
1100 * ASIC lockup when the ASIC inserts VLAN tag into
1101 * certain ipv6 packets. Insert VLAN tags in driver,
1102 * and set event, completion, vlan bits accordingly
1105 if (be_ipv6_tx_stall_chk(adapter
, skb
) &&
1106 be_vlan_tag_tx_chk(adapter
, skb
)) {
1107 skb
= be_insert_vlan_in_pkt(adapter
, skb
, wrb_params
);
1114 dev_kfree_skb_any(skb
);
1119 static struct sk_buff
*be_xmit_workarounds(struct be_adapter
*adapter
,
1120 struct sk_buff
*skb
,
1121 struct be_wrb_params
*wrb_params
)
1123 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1124 * less may cause a transmit stall on that port. So the work-around is
1125 * to pad short packets (<= 32 bytes) to a 36-byte length.
1127 if (unlikely(!BEx_chip(adapter
) && skb
->len
<= 32)) {
1128 if (skb_put_padto(skb
, 36))
1132 if (BEx_chip(adapter
) || lancer_chip(adapter
)) {
1133 skb
= be_lancer_xmit_workarounds(adapter
, skb
, wrb_params
);
1141 static void be_xmit_flush(struct be_adapter
*adapter
, struct be_tx_obj
*txo
)
1143 struct be_queue_info
*txq
= &txo
->q
;
1144 struct be_eth_hdr_wrb
*hdr
= queue_index_node(txq
, txo
->last_req_hdr
);
1146 /* Mark the last request eventable if it hasn't been marked already */
1147 if (!(hdr
->dw
[2] & cpu_to_le32(TX_HDR_WRB_EVT
)))
1148 hdr
->dw
[2] |= cpu_to_le32(TX_HDR_WRB_EVT
| TX_HDR_WRB_COMPL
);
1150 /* compose a dummy wrb if there are odd set of wrbs to notify */
1151 if (!lancer_chip(adapter
) && (txo
->pend_wrb_cnt
& 1)) {
1152 wrb_fill_dummy(queue_head_node(txq
));
1153 queue_head_inc(txq
);
1154 atomic_inc(&txq
->used
);
1155 txo
->pend_wrb_cnt
++;
1156 hdr
->dw
[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK
<<
1157 TX_HDR_WRB_NUM_SHIFT
);
1158 hdr
->dw
[2] |= cpu_to_le32((txo
->last_req_wrb_cnt
+ 1) <<
1159 TX_HDR_WRB_NUM_SHIFT
);
1161 be_txq_notify(adapter
, txo
, txo
->pend_wrb_cnt
);
1162 txo
->pend_wrb_cnt
= 0;
1165 /* OS2BMC related */
1167 #define DHCP_CLIENT_PORT 68
1168 #define DHCP_SERVER_PORT 67
1169 #define NET_BIOS_PORT1 137
1170 #define NET_BIOS_PORT2 138
1171 #define DHCPV6_RAS_PORT 547
1173 #define is_mc_allowed_on_bmc(adapter, eh) \
1174 (!is_multicast_filt_enabled(adapter) && \
1175 is_multicast_ether_addr(eh->h_dest) && \
1176 !is_broadcast_ether_addr(eh->h_dest))
1178 #define is_bc_allowed_on_bmc(adapter, eh) \
1179 (!is_broadcast_filt_enabled(adapter) && \
1180 is_broadcast_ether_addr(eh->h_dest))
1182 #define is_arp_allowed_on_bmc(adapter, skb) \
1183 (is_arp(skb) && is_arp_filt_enabled(adapter))
1185 #define is_broadcast_packet(eh, adapter) \
1186 (is_multicast_ether_addr(eh->h_dest) && \
1187 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1189 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1191 #define is_arp_filt_enabled(adapter) \
1192 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1194 #define is_dhcp_client_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1197 #define is_dhcp_srvr_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1200 #define is_nbios_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1203 #define is_ipv6_na_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & \
1205 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1207 #define is_ipv6_ra_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1210 #define is_ipv6_ras_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1213 #define is_broadcast_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1216 #define is_multicast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1219 static bool be_send_pkt_to_bmc(struct be_adapter
*adapter
,
1220 struct sk_buff
**skb
)
1222 struct ethhdr
*eh
= (struct ethhdr
*)(*skb
)->data
;
1223 bool os2bmc
= false;
1225 if (!be_is_os2bmc_enabled(adapter
))
1228 if (!is_multicast_ether_addr(eh
->h_dest
))
1231 if (is_mc_allowed_on_bmc(adapter
, eh
) ||
1232 is_bc_allowed_on_bmc(adapter
, eh
) ||
1233 is_arp_allowed_on_bmc(adapter
, (*skb
))) {
1238 if ((*skb
)->protocol
== htons(ETH_P_IPV6
)) {
1239 struct ipv6hdr
*hdr
= ipv6_hdr((*skb
));
1240 u8 nexthdr
= hdr
->nexthdr
;
1242 if (nexthdr
== IPPROTO_ICMPV6
) {
1243 struct icmp6hdr
*icmp6
= icmp6_hdr((*skb
));
1245 switch (icmp6
->icmp6_type
) {
1246 case NDISC_ROUTER_ADVERTISEMENT
:
1247 os2bmc
= is_ipv6_ra_filt_enabled(adapter
);
1249 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
1250 os2bmc
= is_ipv6_na_filt_enabled(adapter
);
1258 if (is_udp_pkt((*skb
))) {
1259 struct udphdr
*udp
= udp_hdr((*skb
));
1261 switch (udp
->dest
) {
1262 case DHCP_CLIENT_PORT
:
1263 os2bmc
= is_dhcp_client_filt_enabled(adapter
);
1265 case DHCP_SERVER_PORT
:
1266 os2bmc
= is_dhcp_srvr_filt_enabled(adapter
);
1268 case NET_BIOS_PORT1
:
1269 case NET_BIOS_PORT2
:
1270 os2bmc
= is_nbios_filt_enabled(adapter
);
1272 case DHCPV6_RAS_PORT
:
1273 os2bmc
= is_ipv6_ras_filt_enabled(adapter
);
1280 /* For packets over a vlan, which are destined
1281 * to BMC, asic expects the vlan to be inline in the packet.
1284 *skb
= be_insert_vlan_in_pkt(adapter
, *skb
, NULL
);
1289 static netdev_tx_t
be_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1291 struct be_adapter
*adapter
= netdev_priv(netdev
);
1292 u16 q_idx
= skb_get_queue_mapping(skb
);
1293 struct be_tx_obj
*txo
= &adapter
->tx_obj
[q_idx
];
1294 struct be_wrb_params wrb_params
= { 0 };
1295 bool flush
= !skb
->xmit_more
;
1298 skb
= be_xmit_workarounds(adapter
, skb
, &wrb_params
);
1302 be_get_wrb_params_from_skb(adapter
, skb
, &wrb_params
);
1304 wrb_cnt
= be_xmit_enqueue(adapter
, txo
, skb
, &wrb_params
);
1305 if (unlikely(!wrb_cnt
)) {
1306 dev_kfree_skb_any(skb
);
1310 /* if os2bmc is enabled and if the pkt is destined to bmc,
1311 * enqueue the pkt a 2nd time with mgmt bit set.
1313 if (be_send_pkt_to_bmc(adapter
, &skb
)) {
1314 BE_WRB_F_SET(wrb_params
.features
, OS2BMC
, 1);
1315 wrb_cnt
= be_xmit_enqueue(adapter
, txo
, skb
, &wrb_params
);
1316 if (unlikely(!wrb_cnt
))
1322 if (be_is_txq_full(txo
)) {
1323 netif_stop_subqueue(netdev
, q_idx
);
1324 tx_stats(txo
)->tx_stops
++;
1327 if (flush
|| __netif_subqueue_stopped(netdev
, q_idx
))
1328 be_xmit_flush(adapter
, txo
);
1330 return NETDEV_TX_OK
;
1332 tx_stats(txo
)->tx_drv_drops
++;
1333 /* Flush the already enqueued tx requests */
1334 if (flush
&& txo
->pend_wrb_cnt
)
1335 be_xmit_flush(adapter
, txo
);
1337 return NETDEV_TX_OK
;
1340 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
1342 struct be_adapter
*adapter
= netdev_priv(netdev
);
1343 struct device
*dev
= &adapter
->pdev
->dev
;
1345 if (new_mtu
< BE_MIN_MTU
|| new_mtu
> BE_MAX_MTU
) {
1346 dev_info(dev
, "MTU must be between %d and %d bytes\n",
1347 BE_MIN_MTU
, BE_MAX_MTU
);
1351 dev_info(dev
, "MTU changed from %d to %d bytes\n",
1352 netdev
->mtu
, new_mtu
);
1353 netdev
->mtu
= new_mtu
;
1357 static inline bool be_in_all_promisc(struct be_adapter
*adapter
)
1359 return (adapter
->if_flags
& BE_IF_FLAGS_ALL_PROMISCUOUS
) ==
1360 BE_IF_FLAGS_ALL_PROMISCUOUS
;
1363 static int be_set_vlan_promisc(struct be_adapter
*adapter
)
1365 struct device
*dev
= &adapter
->pdev
->dev
;
1368 if (adapter
->if_flags
& BE_IF_FLAGS_VLAN_PROMISCUOUS
)
1371 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_VLAN_PROMISCUOUS
, ON
);
1373 dev_info(dev
, "Enabled VLAN promiscuous mode\n");
1374 adapter
->if_flags
|= BE_IF_FLAGS_VLAN_PROMISCUOUS
;
1376 dev_err(dev
, "Failed to enable VLAN promiscuous mode\n");
1381 static int be_clear_vlan_promisc(struct be_adapter
*adapter
)
1383 struct device
*dev
= &adapter
->pdev
->dev
;
1386 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_VLAN_PROMISCUOUS
, OFF
);
1388 dev_info(dev
, "Disabling VLAN promiscuous mode\n");
1389 adapter
->if_flags
&= ~BE_IF_FLAGS_VLAN_PROMISCUOUS
;
1395 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1396 * If the user configures more, place BE in vlan promiscuous mode.
1398 static int be_vid_config(struct be_adapter
*adapter
)
1400 struct device
*dev
= &adapter
->pdev
->dev
;
1401 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
1405 /* No need to further configure vids if in promiscuous mode */
1406 if (be_in_all_promisc(adapter
))
1409 if (adapter
->vlans_added
> be_max_vlans(adapter
))
1410 return be_set_vlan_promisc(adapter
);
1412 /* Construct VLAN Table to give to HW */
1413 for_each_set_bit(i
, adapter
->vids
, VLAN_N_VID
)
1414 vids
[num
++] = cpu_to_le16(i
);
1416 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
, vids
, num
, 0);
1418 dev_err(dev
, "Setting HW VLAN filtering failed\n");
1419 /* Set to VLAN promisc mode as setting VLAN filter failed */
1420 if (addl_status(status
) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS
||
1421 addl_status(status
) ==
1422 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES
)
1423 return be_set_vlan_promisc(adapter
);
1424 } else if (adapter
->if_flags
& BE_IF_FLAGS_VLAN_PROMISCUOUS
) {
1425 status
= be_clear_vlan_promisc(adapter
);
1430 static int be_vlan_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1432 struct be_adapter
*adapter
= netdev_priv(netdev
);
1435 /* Packets with VID 0 are always received by Lancer by default */
1436 if (lancer_chip(adapter
) && vid
== 0)
1439 if (test_bit(vid
, adapter
->vids
))
1442 set_bit(vid
, adapter
->vids
);
1443 adapter
->vlans_added
++;
1445 status
= be_vid_config(adapter
);
1447 adapter
->vlans_added
--;
1448 clear_bit(vid
, adapter
->vids
);
1454 static int be_vlan_rem_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
1456 struct be_adapter
*adapter
= netdev_priv(netdev
);
1458 /* Packets with VID 0 are always received by Lancer by default */
1459 if (lancer_chip(adapter
) && vid
== 0)
1462 clear_bit(vid
, adapter
->vids
);
1463 adapter
->vlans_added
--;
1465 return be_vid_config(adapter
);
1468 static void be_clear_all_promisc(struct be_adapter
*adapter
)
1470 be_cmd_rx_filter(adapter
, BE_IF_FLAGS_ALL_PROMISCUOUS
, OFF
);
1471 adapter
->if_flags
&= ~BE_IF_FLAGS_ALL_PROMISCUOUS
;
1474 static void be_set_all_promisc(struct be_adapter
*adapter
)
1476 be_cmd_rx_filter(adapter
, BE_IF_FLAGS_ALL_PROMISCUOUS
, ON
);
1477 adapter
->if_flags
|= BE_IF_FLAGS_ALL_PROMISCUOUS
;
1480 static void be_set_mc_promisc(struct be_adapter
*adapter
)
1484 if (adapter
->if_flags
& BE_IF_FLAGS_MCAST_PROMISCUOUS
)
1487 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_MCAST_PROMISCUOUS
, ON
);
1489 adapter
->if_flags
|= BE_IF_FLAGS_MCAST_PROMISCUOUS
;
1492 static void be_set_mc_list(struct be_adapter
*adapter
)
1496 status
= be_cmd_rx_filter(adapter
, BE_IF_FLAGS_MULTICAST
, ON
);
1498 adapter
->if_flags
&= ~BE_IF_FLAGS_MCAST_PROMISCUOUS
;
1500 be_set_mc_promisc(adapter
);
1503 static void be_set_uc_list(struct be_adapter
*adapter
)
1505 struct netdev_hw_addr
*ha
;
1506 int i
= 1; /* First slot is claimed by the Primary MAC */
1508 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++)
1509 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
1510 adapter
->pmac_id
[i
], 0);
1512 if (netdev_uc_count(adapter
->netdev
) > be_max_uc(adapter
)) {
1513 be_set_all_promisc(adapter
);
1517 netdev_for_each_uc_addr(ha
, adapter
->netdev
) {
1518 adapter
->uc_macs
++; /* First slot is for Primary MAC */
1519 be_cmd_pmac_add(adapter
, (u8
*)ha
->addr
, adapter
->if_handle
,
1520 &adapter
->pmac_id
[adapter
->uc_macs
], 0);
1524 static void be_clear_uc_list(struct be_adapter
*adapter
)
1528 for (i
= 1; i
< (adapter
->uc_macs
+ 1); i
++)
1529 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
1530 adapter
->pmac_id
[i
], 0);
1531 adapter
->uc_macs
= 0;
1534 static void be_set_rx_mode(struct net_device
*netdev
)
1536 struct be_adapter
*adapter
= netdev_priv(netdev
);
1538 if (netdev
->flags
& IFF_PROMISC
) {
1539 be_set_all_promisc(adapter
);
1543 /* Interface was previously in promiscuous mode; disable it */
1544 if (be_in_all_promisc(adapter
)) {
1545 be_clear_all_promisc(adapter
);
1546 if (adapter
->vlans_added
)
1547 be_vid_config(adapter
);
1550 /* Enable multicast promisc if num configured exceeds what we support */
1551 if (netdev
->flags
& IFF_ALLMULTI
||
1552 netdev_mc_count(netdev
) > be_max_mc(adapter
)) {
1553 be_set_mc_promisc(adapter
);
1557 if (netdev_uc_count(netdev
) != adapter
->uc_macs
)
1558 be_set_uc_list(adapter
);
1560 be_set_mc_list(adapter
);
1563 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
1565 struct be_adapter
*adapter
= netdev_priv(netdev
);
1566 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1569 if (!sriov_enabled(adapter
))
1572 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
1575 /* Proceed further only if user provided MAC is different
1578 if (ether_addr_equal(mac
, vf_cfg
->mac_addr
))
1581 if (BEx_chip(adapter
)) {
1582 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
, vf_cfg
->pmac_id
,
1585 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
1586 &vf_cfg
->pmac_id
, vf
+ 1);
1588 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
1593 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed: %#x",
1595 return be_cmd_status(status
);
1598 ether_addr_copy(vf_cfg
->mac_addr
, mac
);
1603 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
1604 struct ifla_vf_info
*vi
)
1606 struct be_adapter
*adapter
= netdev_priv(netdev
);
1607 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1609 if (!sriov_enabled(adapter
))
1612 if (vf
>= adapter
->num_vfs
)
1616 vi
->max_tx_rate
= vf_cfg
->tx_rate
;
1617 vi
->min_tx_rate
= 0;
1618 vi
->vlan
= vf_cfg
->vlan_tag
& VLAN_VID_MASK
;
1619 vi
->qos
= vf_cfg
->vlan_tag
>> VLAN_PRIO_SHIFT
;
1620 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
1621 vi
->linkstate
= adapter
->vf_cfg
[vf
].plink_tracking
;
1622 vi
->spoofchk
= adapter
->vf_cfg
[vf
].spoofchk
;
1627 static int be_set_vf_tvt(struct be_adapter
*adapter
, int vf
, u16 vlan
)
1629 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1630 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
1631 int vf_if_id
= vf_cfg
->if_handle
;
1634 /* Enable Transparent VLAN Tagging */
1635 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1, vf_if_id
, 0, 0);
1639 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1641 status
= be_cmd_vlan_config(adapter
, vf_if_id
, vids
, 1, vf
+ 1);
1643 dev_info(&adapter
->pdev
->dev
,
1644 "Cleared guest VLANs on VF%d", vf
);
1646 /* After TVT is enabled, disallow VFs to program VLAN filters */
1647 if (vf_cfg
->privileges
& BE_PRIV_FILTMGMT
) {
1648 status
= be_cmd_set_fn_privileges(adapter
, vf_cfg
->privileges
&
1649 ~BE_PRIV_FILTMGMT
, vf
+ 1);
1651 vf_cfg
->privileges
&= ~BE_PRIV_FILTMGMT
;
1656 static int be_clear_vf_tvt(struct be_adapter
*adapter
, int vf
)
1658 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1659 struct device
*dev
= &adapter
->pdev
->dev
;
1662 /* Reset Transparent VLAN Tagging. */
1663 status
= be_cmd_set_hsw_config(adapter
, BE_RESET_VLAN_TAG_ID
, vf
+ 1,
1664 vf_cfg
->if_handle
, 0, 0);
1668 /* Allow VFs to program VLAN filtering */
1669 if (!(vf_cfg
->privileges
& BE_PRIV_FILTMGMT
)) {
1670 status
= be_cmd_set_fn_privileges(adapter
, vf_cfg
->privileges
|
1671 BE_PRIV_FILTMGMT
, vf
+ 1);
1673 vf_cfg
->privileges
|= BE_PRIV_FILTMGMT
;
1674 dev_info(dev
, "VF%d: FILTMGMT priv enabled", vf
);
1679 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1683 static int be_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
, u8 qos
)
1685 struct be_adapter
*adapter
= netdev_priv(netdev
);
1686 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1689 if (!sriov_enabled(adapter
))
1692 if (vf
>= adapter
->num_vfs
|| vlan
> 4095 || qos
> 7)
1696 vlan
|= qos
<< VLAN_PRIO_SHIFT
;
1697 status
= be_set_vf_tvt(adapter
, vf
, vlan
);
1699 status
= be_clear_vf_tvt(adapter
, vf
);
1703 dev_err(&adapter
->pdev
->dev
,
1704 "VLAN %d config on VF %d failed : %#x\n", vlan
, vf
,
1706 return be_cmd_status(status
);
1709 vf_cfg
->vlan_tag
= vlan
;
1713 static int be_set_vf_tx_rate(struct net_device
*netdev
, int vf
,
1714 int min_tx_rate
, int max_tx_rate
)
1716 struct be_adapter
*adapter
= netdev_priv(netdev
);
1717 struct device
*dev
= &adapter
->pdev
->dev
;
1718 int percent_rate
, status
= 0;
1722 if (!sriov_enabled(adapter
))
1725 if (vf
>= adapter
->num_vfs
)
1734 status
= be_cmd_link_status_query(adapter
, &link_speed
,
1740 dev_err(dev
, "TX-rate setting not allowed when link is down\n");
1745 if (max_tx_rate
< 100 || max_tx_rate
> link_speed
) {
1746 dev_err(dev
, "TX-rate must be between 100 and %d Mbps\n",
1752 /* On Skyhawk the QOS setting must be done only as a % value */
1753 percent_rate
= link_speed
/ 100;
1754 if (skyhawk_chip(adapter
) && (max_tx_rate
% percent_rate
)) {
1755 dev_err(dev
, "TX-rate must be a multiple of %d Mbps\n",
1762 status
= be_cmd_config_qos(adapter
, max_tx_rate
, link_speed
, vf
+ 1);
1766 adapter
->vf_cfg
[vf
].tx_rate
= max_tx_rate
;
1770 dev_err(dev
, "TX-rate setting of %dMbps on VF%d failed\n",
1772 return be_cmd_status(status
);
1775 static int be_set_vf_link_state(struct net_device
*netdev
, int vf
,
1778 struct be_adapter
*adapter
= netdev_priv(netdev
);
1781 if (!sriov_enabled(adapter
))
1784 if (vf
>= adapter
->num_vfs
)
1787 status
= be_cmd_set_logical_link_config(adapter
, link_state
, vf
+1);
1789 dev_err(&adapter
->pdev
->dev
,
1790 "Link state change on VF %d failed: %#x\n", vf
, status
);
1791 return be_cmd_status(status
);
1794 adapter
->vf_cfg
[vf
].plink_tracking
= link_state
;
1799 static int be_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool enable
)
1801 struct be_adapter
*adapter
= netdev_priv(netdev
);
1802 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1806 if (!sriov_enabled(adapter
))
1809 if (vf
>= adapter
->num_vfs
)
1812 if (BEx_chip(adapter
))
1815 if (enable
== vf_cfg
->spoofchk
)
1818 spoofchk
= enable
? ENABLE_MAC_SPOOFCHK
: DISABLE_MAC_SPOOFCHK
;
1820 status
= be_cmd_set_hsw_config(adapter
, 0, vf
+ 1, vf_cfg
->if_handle
,
1823 dev_err(&adapter
->pdev
->dev
,
1824 "Spoofchk change on VF %d failed: %#x\n", vf
, status
);
1825 return be_cmd_status(status
);
1828 vf_cfg
->spoofchk
= enable
;
1832 static void be_aic_update(struct be_aic_obj
*aic
, u64 rx_pkts
, u64 tx_pkts
,
1835 aic
->rx_pkts_prev
= rx_pkts
;
1836 aic
->tx_reqs_prev
= tx_pkts
;
1840 static int be_get_new_eqd(struct be_eq_obj
*eqo
)
1842 struct be_adapter
*adapter
= eqo
->adapter
;
1844 struct be_aic_obj
*aic
;
1845 struct be_rx_obj
*rxo
;
1846 struct be_tx_obj
*txo
;
1847 u64 rx_pkts
= 0, tx_pkts
= 0;
1852 aic
= &adapter
->aic_obj
[eqo
->idx
];
1860 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
1862 start
= u64_stats_fetch_begin_irq(&rxo
->stats
.sync
);
1863 rx_pkts
+= rxo
->stats
.rx_pkts
;
1864 } while (u64_stats_fetch_retry_irq(&rxo
->stats
.sync
, start
));
1867 for_all_tx_queues_on_eq(adapter
, eqo
, txo
, i
) {
1869 start
= u64_stats_fetch_begin_irq(&txo
->stats
.sync
);
1870 tx_pkts
+= txo
->stats
.tx_reqs
;
1871 } while (u64_stats_fetch_retry_irq(&txo
->stats
.sync
, start
));
1874 /* Skip, if wrapped around or first calculation */
1876 if (!aic
->jiffies
|| time_before(now
, aic
->jiffies
) ||
1877 rx_pkts
< aic
->rx_pkts_prev
||
1878 tx_pkts
< aic
->tx_reqs_prev
) {
1879 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
1880 return aic
->prev_eqd
;
1883 delta
= jiffies_to_msecs(now
- aic
->jiffies
);
1885 return aic
->prev_eqd
;
1887 pps
= (((u32
)(rx_pkts
- aic
->rx_pkts_prev
) * 1000) / delta
) +
1888 (((u32
)(tx_pkts
- aic
->tx_reqs_prev
) * 1000) / delta
);
1889 eqd
= (pps
/ 15000) << 2;
1893 eqd
= min_t(u32
, eqd
, aic
->max_eqd
);
1894 eqd
= max_t(u32
, eqd
, aic
->min_eqd
);
1896 be_aic_update(aic
, rx_pkts
, tx_pkts
, now
);
1901 /* For Skyhawk-R only */
1902 static u32
be_get_eq_delay_mult_enc(struct be_eq_obj
*eqo
)
1904 struct be_adapter
*adapter
= eqo
->adapter
;
1905 struct be_aic_obj
*aic
= &adapter
->aic_obj
[eqo
->idx
];
1906 ulong now
= jiffies
;
1913 if (time_before_eq(now
, aic
->jiffies
) ||
1914 jiffies_to_msecs(now
- aic
->jiffies
) < 1)
1915 eqd
= aic
->prev_eqd
;
1917 eqd
= be_get_new_eqd(eqo
);
1920 mult_enc
= R2I_DLY_ENC_1
;
1922 mult_enc
= R2I_DLY_ENC_2
;
1924 mult_enc
= R2I_DLY_ENC_3
;
1926 mult_enc
= R2I_DLY_ENC_0
;
1928 aic
->prev_eqd
= eqd
;
1933 void be_eqd_update(struct be_adapter
*adapter
, bool force_update
)
1935 struct be_set_eqd set_eqd
[MAX_EVT_QS
];
1936 struct be_aic_obj
*aic
;
1937 struct be_eq_obj
*eqo
;
1938 int i
, num
= 0, eqd
;
1940 for_all_evt_queues(adapter
, eqo
, i
) {
1941 aic
= &adapter
->aic_obj
[eqo
->idx
];
1942 eqd
= be_get_new_eqd(eqo
);
1943 if (force_update
|| eqd
!= aic
->prev_eqd
) {
1944 set_eqd
[num
].delay_multiplier
= (eqd
* 65)/100;
1945 set_eqd
[num
].eq_id
= eqo
->q
.id
;
1946 aic
->prev_eqd
= eqd
;
1952 be_cmd_modify_eqd(adapter
, set_eqd
, num
);
1955 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
1956 struct be_rx_compl_info
*rxcp
)
1958 struct be_rx_stats
*stats
= rx_stats(rxo
);
1960 u64_stats_update_begin(&stats
->sync
);
1962 stats
->rx_bytes
+= rxcp
->pkt_size
;
1964 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
1965 stats
->rx_mcast_pkts
++;
1967 stats
->rx_compl_err
++;
1968 u64_stats_update_end(&stats
->sync
);
1971 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1973 /* L4 checksum is not reliable for non TCP/UDP packets.
1974 * Also ignore ipcksm for ipv6 pkts
1976 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1977 (rxcp
->ip_csum
|| rxcp
->ipv6
) && !rxcp
->err
;
1980 static struct be_rx_page_info
*get_rx_page_info(struct be_rx_obj
*rxo
)
1982 struct be_adapter
*adapter
= rxo
->adapter
;
1983 struct be_rx_page_info
*rx_page_info
;
1984 struct be_queue_info
*rxq
= &rxo
->q
;
1985 u16 frag_idx
= rxq
->tail
;
1987 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1988 BUG_ON(!rx_page_info
->page
);
1990 if (rx_page_info
->last_frag
) {
1991 dma_unmap_page(&adapter
->pdev
->dev
,
1992 dma_unmap_addr(rx_page_info
, bus
),
1993 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1994 rx_page_info
->last_frag
= false;
1996 dma_sync_single_for_cpu(&adapter
->pdev
->dev
,
1997 dma_unmap_addr(rx_page_info
, bus
),
1998 rx_frag_size
, DMA_FROM_DEVICE
);
2001 queue_tail_inc(rxq
);
2002 atomic_dec(&rxq
->used
);
2003 return rx_page_info
;
2006 /* Throwaway the data in the Rx completion */
2007 static void be_rx_compl_discard(struct be_rx_obj
*rxo
,
2008 struct be_rx_compl_info
*rxcp
)
2010 struct be_rx_page_info
*page_info
;
2011 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
2013 for (i
= 0; i
< num_rcvd
; i
++) {
2014 page_info
= get_rx_page_info(rxo
);
2015 put_page(page_info
->page
);
2016 memset(page_info
, 0, sizeof(*page_info
));
2021 * skb_fill_rx_data forms a complete skb for an ether frame
2022 * indicated by rxcp.
2024 static void skb_fill_rx_data(struct be_rx_obj
*rxo
, struct sk_buff
*skb
,
2025 struct be_rx_compl_info
*rxcp
)
2027 struct be_rx_page_info
*page_info
;
2029 u16 hdr_len
, curr_frag_len
, remaining
;
2032 page_info
= get_rx_page_info(rxo
);
2033 start
= page_address(page_info
->page
) + page_info
->page_offset
;
2036 /* Copy data in the first descriptor of this completion */
2037 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
2039 skb
->len
= curr_frag_len
;
2040 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
2041 memcpy(skb
->data
, start
, curr_frag_len
);
2042 /* Complete packet has now been moved to data */
2043 put_page(page_info
->page
);
2045 skb
->tail
+= curr_frag_len
;
2048 memcpy(skb
->data
, start
, hdr_len
);
2049 skb_shinfo(skb
)->nr_frags
= 1;
2050 skb_frag_set_page(skb
, 0, page_info
->page
);
2051 skb_shinfo(skb
)->frags
[0].page_offset
=
2052 page_info
->page_offset
+ hdr_len
;
2053 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0],
2054 curr_frag_len
- hdr_len
);
2055 skb
->data_len
= curr_frag_len
- hdr_len
;
2056 skb
->truesize
+= rx_frag_size
;
2057 skb
->tail
+= hdr_len
;
2059 page_info
->page
= NULL
;
2061 if (rxcp
->pkt_size
<= rx_frag_size
) {
2062 BUG_ON(rxcp
->num_rcvd
!= 1);
2066 /* More frags present for this completion */
2067 remaining
= rxcp
->pkt_size
- curr_frag_len
;
2068 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
2069 page_info
= get_rx_page_info(rxo
);
2070 curr_frag_len
= min(remaining
, rx_frag_size
);
2072 /* Coalesce all frags from the same physical page in one slot */
2073 if (page_info
->page_offset
== 0) {
2076 skb_frag_set_page(skb
, j
, page_info
->page
);
2077 skb_shinfo(skb
)->frags
[j
].page_offset
=
2078 page_info
->page_offset
;
2079 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
2080 skb_shinfo(skb
)->nr_frags
++;
2082 put_page(page_info
->page
);
2085 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
2086 skb
->len
+= curr_frag_len
;
2087 skb
->data_len
+= curr_frag_len
;
2088 skb
->truesize
+= rx_frag_size
;
2089 remaining
-= curr_frag_len
;
2090 page_info
->page
= NULL
;
2092 BUG_ON(j
> MAX_SKB_FRAGS
);
2095 /* Process the RX completion indicated by rxcp when GRO is disabled */
2096 static void be_rx_compl_process(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
2097 struct be_rx_compl_info
*rxcp
)
2099 struct be_adapter
*adapter
= rxo
->adapter
;
2100 struct net_device
*netdev
= adapter
->netdev
;
2101 struct sk_buff
*skb
;
2103 skb
= netdev_alloc_skb_ip_align(netdev
, BE_RX_SKB_ALLOC_SIZE
);
2104 if (unlikely(!skb
)) {
2105 rx_stats(rxo
)->rx_drops_no_skbs
++;
2106 be_rx_compl_discard(rxo
, rxcp
);
2110 skb_fill_rx_data(rxo
, skb
, rxcp
);
2112 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
2113 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2115 skb_checksum_none_assert(skb
);
2117 skb
->protocol
= eth_type_trans(skb
, netdev
);
2118 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
2119 if (netdev
->features
& NETIF_F_RXHASH
)
2120 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
2122 skb
->csum_level
= rxcp
->tunneled
;
2123 skb_mark_napi_id(skb
, napi
);
2126 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
2128 netif_receive_skb(skb
);
2131 /* Process the RX completion indicated by rxcp when GRO is enabled */
2132 static void be_rx_compl_process_gro(struct be_rx_obj
*rxo
,
2133 struct napi_struct
*napi
,
2134 struct be_rx_compl_info
*rxcp
)
2136 struct be_adapter
*adapter
= rxo
->adapter
;
2137 struct be_rx_page_info
*page_info
;
2138 struct sk_buff
*skb
= NULL
;
2139 u16 remaining
, curr_frag_len
;
2142 skb
= napi_get_frags(napi
);
2144 be_rx_compl_discard(rxo
, rxcp
);
2148 remaining
= rxcp
->pkt_size
;
2149 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
2150 page_info
= get_rx_page_info(rxo
);
2152 curr_frag_len
= min(remaining
, rx_frag_size
);
2154 /* Coalesce all frags from the same physical page in one slot */
2155 if (i
== 0 || page_info
->page_offset
== 0) {
2156 /* First frag or Fresh page */
2158 skb_frag_set_page(skb
, j
, page_info
->page
);
2159 skb_shinfo(skb
)->frags
[j
].page_offset
=
2160 page_info
->page_offset
;
2161 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
2163 put_page(page_info
->page
);
2165 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
2166 skb
->truesize
+= rx_frag_size
;
2167 remaining
-= curr_frag_len
;
2168 memset(page_info
, 0, sizeof(*page_info
));
2170 BUG_ON(j
> MAX_SKB_FRAGS
);
2172 skb_shinfo(skb
)->nr_frags
= j
+ 1;
2173 skb
->len
= rxcp
->pkt_size
;
2174 skb
->data_len
= rxcp
->pkt_size
;
2175 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2176 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
2177 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
2178 skb_set_hash(skb
, rxcp
->rss_hash
, PKT_HASH_TYPE_L3
);
2180 skb
->csum_level
= rxcp
->tunneled
;
2181 skb_mark_napi_id(skb
, napi
);
2184 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rxcp
->vlan_tag
);
2186 napi_gro_frags(napi
);
2189 static void be_parse_rx_compl_v1(struct be_eth_rx_compl
*compl,
2190 struct be_rx_compl_info
*rxcp
)
2192 rxcp
->pkt_size
= GET_RX_COMPL_V1_BITS(pktsize
, compl);
2193 rxcp
->vlanf
= GET_RX_COMPL_V1_BITS(vtp
, compl);
2194 rxcp
->err
= GET_RX_COMPL_V1_BITS(err
, compl);
2195 rxcp
->tcpf
= GET_RX_COMPL_V1_BITS(tcpf
, compl);
2196 rxcp
->udpf
= GET_RX_COMPL_V1_BITS(udpf
, compl);
2197 rxcp
->ip_csum
= GET_RX_COMPL_V1_BITS(ipcksm
, compl);
2198 rxcp
->l4_csum
= GET_RX_COMPL_V1_BITS(l4_cksm
, compl);
2199 rxcp
->ipv6
= GET_RX_COMPL_V1_BITS(ip_version
, compl);
2200 rxcp
->num_rcvd
= GET_RX_COMPL_V1_BITS(numfrags
, compl);
2201 rxcp
->pkt_type
= GET_RX_COMPL_V1_BITS(cast_enc
, compl);
2202 rxcp
->rss_hash
= GET_RX_COMPL_V1_BITS(rsshash
, compl);
2204 rxcp
->qnq
= GET_RX_COMPL_V1_BITS(qnq
, compl);
2205 rxcp
->vlan_tag
= GET_RX_COMPL_V1_BITS(vlan_tag
, compl);
2207 rxcp
->port
= GET_RX_COMPL_V1_BITS(port
, compl);
2209 GET_RX_COMPL_V1_BITS(tunneled
, compl);
2212 static void be_parse_rx_compl_v0(struct be_eth_rx_compl
*compl,
2213 struct be_rx_compl_info
*rxcp
)
2215 rxcp
->pkt_size
= GET_RX_COMPL_V0_BITS(pktsize
, compl);
2216 rxcp
->vlanf
= GET_RX_COMPL_V0_BITS(vtp
, compl);
2217 rxcp
->err
= GET_RX_COMPL_V0_BITS(err
, compl);
2218 rxcp
->tcpf
= GET_RX_COMPL_V0_BITS(tcpf
, compl);
2219 rxcp
->udpf
= GET_RX_COMPL_V0_BITS(udpf
, compl);
2220 rxcp
->ip_csum
= GET_RX_COMPL_V0_BITS(ipcksm
, compl);
2221 rxcp
->l4_csum
= GET_RX_COMPL_V0_BITS(l4_cksm
, compl);
2222 rxcp
->ipv6
= GET_RX_COMPL_V0_BITS(ip_version
, compl);
2223 rxcp
->num_rcvd
= GET_RX_COMPL_V0_BITS(numfrags
, compl);
2224 rxcp
->pkt_type
= GET_RX_COMPL_V0_BITS(cast_enc
, compl);
2225 rxcp
->rss_hash
= GET_RX_COMPL_V0_BITS(rsshash
, compl);
2227 rxcp
->qnq
= GET_RX_COMPL_V0_BITS(qnq
, compl);
2228 rxcp
->vlan_tag
= GET_RX_COMPL_V0_BITS(vlan_tag
, compl);
2230 rxcp
->port
= GET_RX_COMPL_V0_BITS(port
, compl);
2231 rxcp
->ip_frag
= GET_RX_COMPL_V0_BITS(ip_frag
, compl);
2234 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
2236 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
2237 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
2238 struct be_adapter
*adapter
= rxo
->adapter
;
2240 /* For checking the valid bit it is Ok to use either definition as the
2241 * valid bit is at the same position in both v0 and v1 Rx compl */
2242 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
2246 be_dws_le_to_cpu(compl, sizeof(*compl));
2248 if (adapter
->be3_native
)
2249 be_parse_rx_compl_v1(compl, rxcp
);
2251 be_parse_rx_compl_v0(compl, rxcp
);
2257 /* In QNQ modes, if qnq bit is not set, then the packet was
2258 * tagged only with the transparent outer vlan-tag and must
2259 * not be treated as a vlan packet by host
2261 if (be_is_qnq_mode(adapter
) && !rxcp
->qnq
)
2264 if (!lancer_chip(adapter
))
2265 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
2267 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
2268 !test_bit(rxcp
->vlan_tag
, adapter
->vids
))
2272 /* As the compl has been parsed, reset it; we wont touch it again */
2273 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
2275 queue_tail_inc(&rxo
->cq
);
2279 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
2281 u32 order
= get_order(size
);
2285 return alloc_pages(gfp
, order
);
2289 * Allocate a page, split it to fragments of size rx_frag_size and post as
2290 * receive buffers to BE
2292 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
, u32 frags_needed
)
2294 struct be_adapter
*adapter
= rxo
->adapter
;
2295 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
2296 struct be_queue_info
*rxq
= &rxo
->q
;
2297 struct page
*pagep
= NULL
;
2298 struct device
*dev
= &adapter
->pdev
->dev
;
2299 struct be_eth_rx_d
*rxd
;
2300 u64 page_dmaaddr
= 0, frag_dmaaddr
;
2301 u32 posted
, page_offset
= 0, notify
= 0;
2303 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
2304 for (posted
= 0; posted
< frags_needed
&& !page_info
->page
; posted
++) {
2306 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
2307 if (unlikely(!pagep
)) {
2308 rx_stats(rxo
)->rx_post_fail
++;
2311 page_dmaaddr
= dma_map_page(dev
, pagep
, 0,
2312 adapter
->big_page_size
,
2314 if (dma_mapping_error(dev
, page_dmaaddr
)) {
2317 adapter
->drv_stats
.dma_map_errors
++;
2323 page_offset
+= rx_frag_size
;
2325 page_info
->page_offset
= page_offset
;
2326 page_info
->page
= pagep
;
2328 rxd
= queue_head_node(rxq
);
2329 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
2330 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
2331 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
2333 /* Any space left in the current big page for another frag? */
2334 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
2335 adapter
->big_page_size
) {
2337 page_info
->last_frag
= true;
2338 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
2340 dma_unmap_addr_set(page_info
, bus
, frag_dmaaddr
);
2343 prev_page_info
= page_info
;
2344 queue_head_inc(rxq
);
2345 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
2348 /* Mark the last frag of a page when we break out of the above loop
2349 * with no more slots available in the RXQ
2352 prev_page_info
->last_frag
= true;
2353 dma_unmap_addr_set(prev_page_info
, bus
, page_dmaaddr
);
2357 atomic_add(posted
, &rxq
->used
);
2358 if (rxo
->rx_post_starved
)
2359 rxo
->rx_post_starved
= false;
2361 notify
= min(MAX_NUM_POST_ERX_DB
, posted
);
2362 be_rxq_notify(adapter
, rxq
->id
, notify
);
2365 } else if (atomic_read(&rxq
->used
) == 0) {
2366 /* Let be_worker replenish when memory is available */
2367 rxo
->rx_post_starved
= true;
2371 static struct be_tx_compl_info
*be_tx_compl_get(struct be_tx_obj
*txo
)
2373 struct be_queue_info
*tx_cq
= &txo
->cq
;
2374 struct be_tx_compl_info
*txcp
= &txo
->txcp
;
2375 struct be_eth_tx_compl
*compl = queue_tail_node(tx_cq
);
2377 if (compl->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
2380 /* Ensure load ordering of valid bit dword and other dwords below */
2382 be_dws_le_to_cpu(compl, sizeof(*compl));
2384 txcp
->status
= GET_TX_COMPL_BITS(status
, compl);
2385 txcp
->end_index
= GET_TX_COMPL_BITS(wrb_index
, compl);
2387 compl->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
2388 queue_tail_inc(tx_cq
);
2392 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
2393 struct be_tx_obj
*txo
, u16 last_index
)
2395 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
2396 struct be_queue_info
*txq
= &txo
->q
;
2397 u16 frag_index
, num_wrbs
= 0;
2398 struct sk_buff
*skb
= NULL
;
2399 bool unmap_skb_hdr
= false;
2400 struct be_eth_wrb
*wrb
;
2403 if (sent_skbs
[txq
->tail
]) {
2404 /* Free skb from prev req */
2406 dev_consume_skb_any(skb
);
2407 skb
= sent_skbs
[txq
->tail
];
2408 sent_skbs
[txq
->tail
] = NULL
;
2409 queue_tail_inc(txq
); /* skip hdr wrb */
2411 unmap_skb_hdr
= true;
2413 wrb
= queue_tail_node(txq
);
2414 frag_index
= txq
->tail
;
2415 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
2416 (unmap_skb_hdr
&& skb_headlen(skb
)));
2417 unmap_skb_hdr
= false;
2418 queue_tail_inc(txq
);
2420 } while (frag_index
!= last_index
);
2421 dev_consume_skb_any(skb
);
2426 /* Return the number of events in the event queue */
2427 static inline int events_get(struct be_eq_obj
*eqo
)
2429 struct be_eq_entry
*eqe
;
2433 eqe
= queue_tail_node(&eqo
->q
);
2440 queue_tail_inc(&eqo
->q
);
2446 /* Leaves the EQ is disarmed state */
2447 static void be_eq_clean(struct be_eq_obj
*eqo
)
2449 int num
= events_get(eqo
);
2451 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, num
, 0);
2454 /* Free posted rx buffers that were not used */
2455 static void be_rxq_clean(struct be_rx_obj
*rxo
)
2457 struct be_queue_info
*rxq
= &rxo
->q
;
2458 struct be_rx_page_info
*page_info
;
2460 while (atomic_read(&rxq
->used
) > 0) {
2461 page_info
= get_rx_page_info(rxo
);
2462 put_page(page_info
->page
);
2463 memset(page_info
, 0, sizeof(*page_info
));
2465 BUG_ON(atomic_read(&rxq
->used
));
2470 static void be_rx_cq_clean(struct be_rx_obj
*rxo
)
2472 struct be_queue_info
*rx_cq
= &rxo
->cq
;
2473 struct be_rx_compl_info
*rxcp
;
2474 struct be_adapter
*adapter
= rxo
->adapter
;
2477 /* Consume pending rx completions.
2478 * Wait for the flush completion (identified by zero num_rcvd)
2479 * to arrive. Notify CQ even when there are no more CQ entries
2480 * for HW to flush partially coalesced CQ entries.
2481 * In Lancer, there is no need to wait for flush compl.
2484 rxcp
= be_rx_compl_get(rxo
);
2486 if (lancer_chip(adapter
))
2489 if (flush_wait
++ > 50 ||
2490 be_check_error(adapter
,
2492 dev_warn(&adapter
->pdev
->dev
,
2493 "did not receive flush compl\n");
2496 be_cq_notify(adapter
, rx_cq
->id
, true, 0);
2499 be_rx_compl_discard(rxo
, rxcp
);
2500 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
2501 if (rxcp
->num_rcvd
== 0)
2506 /* After cleanup, leave the CQ in unarmed state */
2507 be_cq_notify(adapter
, rx_cq
->id
, false, 0);
2510 static void be_tx_compl_clean(struct be_adapter
*adapter
)
2512 u16 end_idx
, notified_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
2513 struct device
*dev
= &adapter
->pdev
->dev
;
2514 struct be_tx_compl_info
*txcp
;
2515 struct be_queue_info
*txq
;
2516 struct be_tx_obj
*txo
;
2517 int i
, pending_txqs
;
2519 /* Stop polling for compls when HW has been silent for 10ms */
2521 pending_txqs
= adapter
->num_tx_qs
;
2523 for_all_tx_queues(adapter
, txo
, i
) {
2527 while ((txcp
= be_tx_compl_get(txo
))) {
2529 be_tx_compl_process(adapter
, txo
,
2534 be_cq_notify(adapter
, txo
->cq
.id
, false, cmpl
);
2535 atomic_sub(num_wrbs
, &txq
->used
);
2538 if (!be_is_tx_compl_pending(txo
))
2542 if (pending_txqs
== 0 || ++timeo
> 10 ||
2543 be_check_error(adapter
, BE_ERROR_HW
))
2549 /* Free enqueued TX that was never notified to HW */
2550 for_all_tx_queues(adapter
, txo
, i
) {
2553 if (atomic_read(&txq
->used
)) {
2554 dev_info(dev
, "txq%d: cleaning %d pending tx-wrbs\n",
2555 i
, atomic_read(&txq
->used
));
2556 notified_idx
= txq
->tail
;
2557 end_idx
= txq
->tail
;
2558 index_adv(&end_idx
, atomic_read(&txq
->used
) - 1,
2560 /* Use the tx-compl process logic to handle requests
2561 * that were not sent to the HW.
2563 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
2564 atomic_sub(num_wrbs
, &txq
->used
);
2565 BUG_ON(atomic_read(&txq
->used
));
2566 txo
->pend_wrb_cnt
= 0;
2567 /* Since hw was never notified of these requests,
2570 txq
->head
= notified_idx
;
2571 txq
->tail
= notified_idx
;
2576 static void be_evt_queues_destroy(struct be_adapter
*adapter
)
2578 struct be_eq_obj
*eqo
;
2581 for_all_evt_queues(adapter
, eqo
, i
) {
2582 if (eqo
->q
.created
) {
2584 be_cmd_q_destroy(adapter
, &eqo
->q
, QTYPE_EQ
);
2585 napi_hash_del(&eqo
->napi
);
2586 netif_napi_del(&eqo
->napi
);
2588 free_cpumask_var(eqo
->affinity_mask
);
2589 be_queue_free(adapter
, &eqo
->q
);
2593 static int be_evt_queues_create(struct be_adapter
*adapter
)
2595 struct be_queue_info
*eq
;
2596 struct be_eq_obj
*eqo
;
2597 struct be_aic_obj
*aic
;
2600 adapter
->num_evt_qs
= min_t(u16
, num_irqs(adapter
),
2601 adapter
->cfg_num_qs
);
2603 for_all_evt_queues(adapter
, eqo
, i
) {
2604 int numa_node
= dev_to_node(&adapter
->pdev
->dev
);
2605 if (!zalloc_cpumask_var(&eqo
->affinity_mask
, GFP_KERNEL
))
2607 cpumask_set_cpu(cpumask_local_spread(i
, numa_node
),
2608 eqo
->affinity_mask
);
2609 netif_napi_add(adapter
->netdev
, &eqo
->napi
, be_poll
,
2611 napi_hash_add(&eqo
->napi
);
2612 aic
= &adapter
->aic_obj
[i
];
2613 eqo
->adapter
= adapter
;
2615 aic
->max_eqd
= BE_MAX_EQD
;
2619 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
2620 sizeof(struct be_eq_entry
));
2624 rc
= be_cmd_eq_create(adapter
, eqo
);
2631 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
2633 struct be_queue_info
*q
;
2635 q
= &adapter
->mcc_obj
.q
;
2637 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
2638 be_queue_free(adapter
, q
);
2640 q
= &adapter
->mcc_obj
.cq
;
2642 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2643 be_queue_free(adapter
, q
);
2646 /* Must be called only after TX qs are created as MCC shares TX EQ */
2647 static int be_mcc_queues_create(struct be_adapter
*adapter
)
2649 struct be_queue_info
*q
, *cq
;
2651 cq
= &adapter
->mcc_obj
.cq
;
2652 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
2653 sizeof(struct be_mcc_compl
)))
2656 /* Use the default EQ for MCC completions */
2657 if (be_cmd_cq_create(adapter
, cq
, &mcc_eqo(adapter
)->q
, true, 0))
2660 q
= &adapter
->mcc_obj
.q
;
2661 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
2662 goto mcc_cq_destroy
;
2664 if (be_cmd_mccq_create(adapter
, q
, cq
))
2670 be_queue_free(adapter
, q
);
2672 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
2674 be_queue_free(adapter
, cq
);
2679 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
2681 struct be_queue_info
*q
;
2682 struct be_tx_obj
*txo
;
2685 for_all_tx_queues(adapter
, txo
, i
) {
2688 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
2689 be_queue_free(adapter
, q
);
2693 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2694 be_queue_free(adapter
, q
);
2698 static int be_tx_qs_create(struct be_adapter
*adapter
)
2700 struct be_queue_info
*cq
;
2701 struct be_tx_obj
*txo
;
2702 struct be_eq_obj
*eqo
;
2705 adapter
->num_tx_qs
= min(adapter
->num_evt_qs
, be_max_txqs(adapter
));
2707 for_all_tx_queues(adapter
, txo
, i
) {
2709 status
= be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
2710 sizeof(struct be_eth_tx_compl
));
2714 u64_stats_init(&txo
->stats
.sync
);
2715 u64_stats_init(&txo
->stats
.sync_compl
);
2717 /* If num_evt_qs is less than num_tx_qs, then more than
2718 * one txq share an eq
2720 eqo
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
];
2721 status
= be_cmd_cq_create(adapter
, cq
, &eqo
->q
, false, 3);
2725 status
= be_queue_alloc(adapter
, &txo
->q
, TX_Q_LEN
,
2726 sizeof(struct be_eth_wrb
));
2730 status
= be_cmd_txq_create(adapter
, txo
);
2734 netif_set_xps_queue(adapter
->netdev
, eqo
->affinity_mask
,
2738 dev_info(&adapter
->pdev
->dev
, "created %d TX queue(s)\n",
2739 adapter
->num_tx_qs
);
2743 static void be_rx_cqs_destroy(struct be_adapter
*adapter
)
2745 struct be_queue_info
*q
;
2746 struct be_rx_obj
*rxo
;
2749 for_all_rx_queues(adapter
, rxo
, i
) {
2752 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
2753 be_queue_free(adapter
, q
);
2757 static int be_rx_cqs_create(struct be_adapter
*adapter
)
2759 struct be_queue_info
*eq
, *cq
;
2760 struct be_rx_obj
*rxo
;
2763 /* We can create as many RSS rings as there are EQs. */
2764 adapter
->num_rss_qs
= adapter
->num_evt_qs
;
2766 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2767 if (adapter
->num_rss_qs
<= 1)
2768 adapter
->num_rss_qs
= 0;
2770 adapter
->num_rx_qs
= adapter
->num_rss_qs
+ adapter
->need_def_rxq
;
2772 /* When the interface is not capable of RSS rings (and there is no
2773 * need to create a default RXQ) we'll still need one RXQ
2775 if (adapter
->num_rx_qs
== 0)
2776 adapter
->num_rx_qs
= 1;
2778 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
2779 for_all_rx_queues(adapter
, rxo
, i
) {
2780 rxo
->adapter
= adapter
;
2782 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
2783 sizeof(struct be_eth_rx_compl
));
2787 u64_stats_init(&rxo
->stats
.sync
);
2788 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
2789 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
2794 dev_info(&adapter
->pdev
->dev
,
2795 "created %d RX queue(s)\n", adapter
->num_rx_qs
);
2799 static irqreturn_t
be_intx(int irq
, void *dev
)
2801 struct be_eq_obj
*eqo
= dev
;
2802 struct be_adapter
*adapter
= eqo
->adapter
;
2805 /* IRQ is not expected when NAPI is scheduled as the EQ
2806 * will not be armed.
2807 * But, this can happen on Lancer INTx where it takes
2808 * a while to de-assert INTx or in BE2 where occasionaly
2809 * an interrupt may be raised even when EQ is unarmed.
2810 * If NAPI is already scheduled, then counting & notifying
2811 * events will orphan them.
2813 if (napi_schedule_prep(&eqo
->napi
)) {
2814 num_evts
= events_get(eqo
);
2815 __napi_schedule(&eqo
->napi
);
2817 eqo
->spurious_intr
= 0;
2819 be_eq_notify(adapter
, eqo
->q
.id
, false, true, num_evts
, 0);
2821 /* Return IRQ_HANDLED only for the the first spurious intr
2822 * after a valid intr to stop the kernel from branding
2823 * this irq as a bad one!
2825 if (num_evts
|| eqo
->spurious_intr
++ == 0)
2831 static irqreturn_t
be_msix(int irq
, void *dev
)
2833 struct be_eq_obj
*eqo
= dev
;
2835 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0, 0);
2836 napi_schedule(&eqo
->napi
);
2840 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
2842 return (rxcp
->tcpf
&& !rxcp
->err
&& rxcp
->l4_csum
) ? true : false;
2845 static int be_process_rx(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
2846 int budget
, int polling
)
2848 struct be_adapter
*adapter
= rxo
->adapter
;
2849 struct be_queue_info
*rx_cq
= &rxo
->cq
;
2850 struct be_rx_compl_info
*rxcp
;
2852 u32 frags_consumed
= 0;
2854 for (work_done
= 0; work_done
< budget
; work_done
++) {
2855 rxcp
= be_rx_compl_get(rxo
);
2859 /* Is it a flush compl that has no data */
2860 if (unlikely(rxcp
->num_rcvd
== 0))
2863 /* Discard compl with partial DMA Lancer B0 */
2864 if (unlikely(!rxcp
->pkt_size
)) {
2865 be_rx_compl_discard(rxo
, rxcp
);
2869 /* On BE drop pkts that arrive due to imperfect filtering in
2870 * promiscuous mode on some skews
2872 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
2873 !lancer_chip(adapter
))) {
2874 be_rx_compl_discard(rxo
, rxcp
);
2878 /* Don't do gro when we're busy_polling */
2879 if (do_gro(rxcp
) && polling
!= BUSY_POLLING
)
2880 be_rx_compl_process_gro(rxo
, napi
, rxcp
);
2882 be_rx_compl_process(rxo
, napi
, rxcp
);
2885 frags_consumed
+= rxcp
->num_rcvd
;
2886 be_rx_stats_update(rxo
, rxcp
);
2890 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
2892 /* When an rx-obj gets into post_starved state, just
2893 * let be_worker do the posting.
2895 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
&&
2896 !rxo
->rx_post_starved
)
2897 be_post_rx_frags(rxo
, GFP_ATOMIC
,
2898 max_t(u32
, MAX_RX_POST
,
2905 static inline void be_update_tx_err(struct be_tx_obj
*txo
, u8 status
)
2908 case BE_TX_COMP_HDR_PARSE_ERR
:
2909 tx_stats(txo
)->tx_hdr_parse_err
++;
2911 case BE_TX_COMP_NDMA_ERR
:
2912 tx_stats(txo
)->tx_dma_err
++;
2914 case BE_TX_COMP_ACL_ERR
:
2915 tx_stats(txo
)->tx_spoof_check_err
++;
2920 static inline void lancer_update_tx_err(struct be_tx_obj
*txo
, u8 status
)
2923 case LANCER_TX_COMP_LSO_ERR
:
2924 tx_stats(txo
)->tx_tso_err
++;
2926 case LANCER_TX_COMP_HSW_DROP_MAC_ERR
:
2927 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR
:
2928 tx_stats(txo
)->tx_spoof_check_err
++;
2930 case LANCER_TX_COMP_QINQ_ERR
:
2931 tx_stats(txo
)->tx_qinq_err
++;
2933 case LANCER_TX_COMP_PARITY_ERR
:
2934 tx_stats(txo
)->tx_internal_parity_err
++;
2936 case LANCER_TX_COMP_DMA_ERR
:
2937 tx_stats(txo
)->tx_dma_err
++;
2942 static void be_process_tx(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
2945 int num_wrbs
= 0, work_done
= 0;
2946 struct be_tx_compl_info
*txcp
;
2948 while ((txcp
= be_tx_compl_get(txo
))) {
2949 num_wrbs
+= be_tx_compl_process(adapter
, txo
, txcp
->end_index
);
2953 if (lancer_chip(adapter
))
2954 lancer_update_tx_err(txo
, txcp
->status
);
2956 be_update_tx_err(txo
, txcp
->status
);
2961 be_cq_notify(adapter
, txo
->cq
.id
, true, work_done
);
2962 atomic_sub(num_wrbs
, &txo
->q
.used
);
2964 /* As Tx wrbs have been freed up, wake up netdev queue
2965 * if it was stopped due to lack of tx wrbs. */
2966 if (__netif_subqueue_stopped(adapter
->netdev
, idx
) &&
2967 be_can_txq_wake(txo
)) {
2968 netif_wake_subqueue(adapter
->netdev
, idx
);
2971 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
2972 tx_stats(txo
)->tx_compl
+= work_done
;
2973 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
2977 #ifdef CONFIG_NET_RX_BUSY_POLL
2978 static inline bool be_lock_napi(struct be_eq_obj
*eqo
)
2982 spin_lock(&eqo
->lock
); /* BH is already disabled */
2983 if (eqo
->state
& BE_EQ_LOCKED
) {
2984 WARN_ON(eqo
->state
& BE_EQ_NAPI
);
2985 eqo
->state
|= BE_EQ_NAPI_YIELD
;
2988 eqo
->state
= BE_EQ_NAPI
;
2990 spin_unlock(&eqo
->lock
);
2994 static inline void be_unlock_napi(struct be_eq_obj
*eqo
)
2996 spin_lock(&eqo
->lock
); /* BH is already disabled */
2998 WARN_ON(eqo
->state
& (BE_EQ_POLL
| BE_EQ_NAPI_YIELD
));
2999 eqo
->state
= BE_EQ_IDLE
;
3001 spin_unlock(&eqo
->lock
);
3004 static inline bool be_lock_busy_poll(struct be_eq_obj
*eqo
)
3008 spin_lock_bh(&eqo
->lock
);
3009 if (eqo
->state
& BE_EQ_LOCKED
) {
3010 eqo
->state
|= BE_EQ_POLL_YIELD
;
3013 eqo
->state
|= BE_EQ_POLL
;
3015 spin_unlock_bh(&eqo
->lock
);
3019 static inline void be_unlock_busy_poll(struct be_eq_obj
*eqo
)
3021 spin_lock_bh(&eqo
->lock
);
3023 WARN_ON(eqo
->state
& (BE_EQ_NAPI
));
3024 eqo
->state
= BE_EQ_IDLE
;
3026 spin_unlock_bh(&eqo
->lock
);
3029 static inline void be_enable_busy_poll(struct be_eq_obj
*eqo
)
3031 spin_lock_init(&eqo
->lock
);
3032 eqo
->state
= BE_EQ_IDLE
;
3035 static inline void be_disable_busy_poll(struct be_eq_obj
*eqo
)
3039 /* It's enough to just acquire napi lock on the eqo to stop
3040 * be_busy_poll() from processing any queueus.
3042 while (!be_lock_napi(eqo
))
3048 #else /* CONFIG_NET_RX_BUSY_POLL */
3050 static inline bool be_lock_napi(struct be_eq_obj
*eqo
)
3055 static inline void be_unlock_napi(struct be_eq_obj
*eqo
)
3059 static inline bool be_lock_busy_poll(struct be_eq_obj
*eqo
)
3064 static inline void be_unlock_busy_poll(struct be_eq_obj
*eqo
)
3068 static inline void be_enable_busy_poll(struct be_eq_obj
*eqo
)
3072 static inline void be_disable_busy_poll(struct be_eq_obj
*eqo
)
3075 #endif /* CONFIG_NET_RX_BUSY_POLL */
3077 int be_poll(struct napi_struct
*napi
, int budget
)
3079 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
3080 struct be_adapter
*adapter
= eqo
->adapter
;
3081 int max_work
= 0, work
, i
, num_evts
;
3082 struct be_rx_obj
*rxo
;
3083 struct be_tx_obj
*txo
;
3086 num_evts
= events_get(eqo
);
3088 for_all_tx_queues_on_eq(adapter
, eqo
, txo
, i
)
3089 be_process_tx(adapter
, txo
, i
);
3091 if (be_lock_napi(eqo
)) {
3092 /* This loop will iterate twice for EQ0 in which
3093 * completions of the last RXQ (default one) are also processed
3094 * For other EQs the loop iterates only once
3096 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
3097 work
= be_process_rx(rxo
, napi
, budget
, NAPI_POLLING
);
3098 max_work
= max(work
, max_work
);
3100 be_unlock_napi(eqo
);
3105 if (is_mcc_eqo(eqo
))
3106 be_process_mcc(adapter
);
3108 if (max_work
< budget
) {
3109 napi_complete(napi
);
3111 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3112 * delay via a delay multiplier encoding value
3114 if (skyhawk_chip(adapter
))
3115 mult_enc
= be_get_eq_delay_mult_enc(eqo
);
3117 be_eq_notify(adapter
, eqo
->q
.id
, true, false, num_evts
,
3120 /* As we'll continue in polling mode, count and clear events */
3121 be_eq_notify(adapter
, eqo
->q
.id
, false, false, num_evts
, 0);
3126 #ifdef CONFIG_NET_RX_BUSY_POLL
3127 static int be_busy_poll(struct napi_struct
*napi
)
3129 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
3130 struct be_adapter
*adapter
= eqo
->adapter
;
3131 struct be_rx_obj
*rxo
;
3134 if (!be_lock_busy_poll(eqo
))
3135 return LL_FLUSH_BUSY
;
3137 for_all_rx_queues_on_eq(adapter
, eqo
, rxo
, i
) {
3138 work
= be_process_rx(rxo
, napi
, 4, BUSY_POLLING
);
3143 be_unlock_busy_poll(eqo
);
3148 void be_detect_error(struct be_adapter
*adapter
)
3150 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
3151 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
3153 struct device
*dev
= &adapter
->pdev
->dev
;
3155 if (be_check_error(adapter
, BE_ERROR_HW
))
3158 if (lancer_chip(adapter
)) {
3159 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3160 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
3161 be_set_error(adapter
, BE_ERROR_UE
);
3162 sliport_err1
= ioread32(adapter
->db
+
3163 SLIPORT_ERROR1_OFFSET
);
3164 sliport_err2
= ioread32(adapter
->db
+
3165 SLIPORT_ERROR2_OFFSET
);
3166 /* Do not log error messages if its a FW reset */
3167 if (sliport_err1
== SLIPORT_ERROR_FW_RESET1
&&
3168 sliport_err2
== SLIPORT_ERROR_FW_RESET2
) {
3169 dev_info(dev
, "Firmware update in progress\n");
3171 dev_err(dev
, "Error detected in the card\n");
3172 dev_err(dev
, "ERR: sliport status 0x%x\n",
3174 dev_err(dev
, "ERR: sliport error1 0x%x\n",
3176 dev_err(dev
, "ERR: sliport error2 0x%x\n",
3181 ue_lo
= ioread32(adapter
->pcicfg
+ PCICFG_UE_STATUS_LOW
);
3182 ue_hi
= ioread32(adapter
->pcicfg
+ PCICFG_UE_STATUS_HIGH
);
3183 ue_lo_mask
= ioread32(adapter
->pcicfg
+
3184 PCICFG_UE_STATUS_LOW_MASK
);
3185 ue_hi_mask
= ioread32(adapter
->pcicfg
+
3186 PCICFG_UE_STATUS_HI_MASK
);
3188 ue_lo
= (ue_lo
& ~ue_lo_mask
);
3189 ue_hi
= (ue_hi
& ~ue_hi_mask
);
3191 /* On certain platforms BE hardware can indicate spurious UEs.
3192 * Allow HW to stop working completely in case of a real UE.
3193 * Hence not setting the hw_error for UE detection.
3196 if (ue_lo
|| ue_hi
) {
3198 "Unrecoverable Error detected in the adapter");
3199 dev_err(dev
, "Please reboot server to recover");
3200 if (skyhawk_chip(adapter
))
3201 be_set_error(adapter
, BE_ERROR_UE
);
3203 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
3205 dev_err(dev
, "UE: %s bit set\n",
3206 ue_status_low_desc
[i
]);
3208 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
3210 dev_err(dev
, "UE: %s bit set\n",
3211 ue_status_hi_desc
[i
]);
3217 static void be_msix_disable(struct be_adapter
*adapter
)
3219 if (msix_enabled(adapter
)) {
3220 pci_disable_msix(adapter
->pdev
);
3221 adapter
->num_msix_vec
= 0;
3222 adapter
->num_msix_roce_vec
= 0;
3226 static int be_msix_enable(struct be_adapter
*adapter
)
3229 struct device
*dev
= &adapter
->pdev
->dev
;
3231 /* If RoCE is supported, program the max number of NIC vectors that
3232 * may be configured via set-channels, along with vectors needed for
3233 * RoCe. Else, just program the number we'll use initially.
3235 if (be_roce_supported(adapter
))
3236 num_vec
= min_t(int, 2 * be_max_eqs(adapter
),
3237 2 * num_online_cpus());
3239 num_vec
= adapter
->cfg_num_qs
;
3241 for (i
= 0; i
< num_vec
; i
++)
3242 adapter
->msix_entries
[i
].entry
= i
;
3244 num_vec
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
3245 MIN_MSIX_VECTORS
, num_vec
);
3249 if (be_roce_supported(adapter
) && num_vec
> MIN_MSIX_VECTORS
) {
3250 adapter
->num_msix_roce_vec
= num_vec
/ 2;
3251 dev_info(dev
, "enabled %d MSI-x vector(s) for RoCE\n",
3252 adapter
->num_msix_roce_vec
);
3255 adapter
->num_msix_vec
= num_vec
- adapter
->num_msix_roce_vec
;
3257 dev_info(dev
, "enabled %d MSI-x vector(s) for NIC\n",
3258 adapter
->num_msix_vec
);
3262 dev_warn(dev
, "MSIx enable failed\n");
3264 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3265 if (be_virtfn(adapter
))
3270 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
3271 struct be_eq_obj
*eqo
)
3273 return adapter
->msix_entries
[eqo
->msix_idx
].vector
;
3276 static int be_msix_register(struct be_adapter
*adapter
)
3278 struct net_device
*netdev
= adapter
->netdev
;
3279 struct be_eq_obj
*eqo
;
3282 for_all_evt_queues(adapter
, eqo
, i
) {
3283 sprintf(eqo
->desc
, "%s-q%d", netdev
->name
, i
);
3284 vec
= be_msix_vec_get(adapter
, eqo
);
3285 status
= request_irq(vec
, be_msix
, 0, eqo
->desc
, eqo
);
3289 irq_set_affinity_hint(vec
, eqo
->affinity_mask
);
3294 for (i
--, eqo
= &adapter
->eq_obj
[i
]; i
>= 0; i
--, eqo
--)
3295 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
3296 dev_warn(&adapter
->pdev
->dev
, "MSIX Request IRQ failed - err %d\n",
3298 be_msix_disable(adapter
);
3302 static int be_irq_register(struct be_adapter
*adapter
)
3304 struct net_device
*netdev
= adapter
->netdev
;
3307 if (msix_enabled(adapter
)) {
3308 status
= be_msix_register(adapter
);
3311 /* INTx is not supported for VF */
3312 if (be_virtfn(adapter
))
3316 /* INTx: only the first EQ is used */
3317 netdev
->irq
= adapter
->pdev
->irq
;
3318 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
3319 &adapter
->eq_obj
[0]);
3321 dev_err(&adapter
->pdev
->dev
,
3322 "INTx request IRQ failed - err %d\n", status
);
3326 adapter
->isr_registered
= true;
3330 static void be_irq_unregister(struct be_adapter
*adapter
)
3332 struct net_device
*netdev
= adapter
->netdev
;
3333 struct be_eq_obj
*eqo
;
3336 if (!adapter
->isr_registered
)
3340 if (!msix_enabled(adapter
)) {
3341 free_irq(netdev
->irq
, &adapter
->eq_obj
[0]);
3346 for_all_evt_queues(adapter
, eqo
, i
) {
3347 vec
= be_msix_vec_get(adapter
, eqo
);
3348 irq_set_affinity_hint(vec
, NULL
);
3353 adapter
->isr_registered
= false;
3356 static void be_rx_qs_destroy(struct be_adapter
*adapter
)
3358 struct be_queue_info
*q
;
3359 struct be_rx_obj
*rxo
;
3362 for_all_rx_queues(adapter
, rxo
, i
) {
3365 /* If RXQs are destroyed while in an "out of buffer"
3366 * state, there is a possibility of an HW stall on
3367 * Lancer. So, post 64 buffers to each queue to relieve
3368 * the "out of buffer" condition.
3369 * Make sure there's space in the RXQ before posting.
3371 if (lancer_chip(adapter
)) {
3372 be_rx_cq_clean(rxo
);
3373 if (atomic_read(&q
->used
) == 0)
3374 be_post_rx_frags(rxo
, GFP_KERNEL
,
3378 be_cmd_rxq_destroy(adapter
, q
);
3379 be_rx_cq_clean(rxo
);
3382 be_queue_free(adapter
, q
);
3386 static void be_disable_if_filters(struct be_adapter
*adapter
)
3388 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
3389 adapter
->pmac_id
[0], 0);
3391 be_clear_uc_list(adapter
);
3393 /* The IFACE flags are enabled in the open path and cleared
3394 * in the close path. When a VF gets detached from the host and
3395 * assigned to a VM the following happens:
3396 * - VF's IFACE flags get cleared in the detach path
3397 * - IFACE create is issued by the VF in the attach path
3398 * Due to a bug in the BE3/Skyhawk-R FW
3399 * (Lancer FW doesn't have the bug), the IFACE capability flags
3400 * specified along with the IFACE create cmd issued by a VF are not
3401 * honoured by FW. As a consequence, if a *new* driver
3402 * (that enables/disables IFACE flags in open/close)
3403 * is loaded in the host and an *old* driver is * used by a VM/VF,
3404 * the IFACE gets created *without* the needed flags.
3405 * To avoid this, disable RX-filter flags only for Lancer.
3407 if (lancer_chip(adapter
)) {
3408 be_cmd_rx_filter(adapter
, BE_IF_ALL_FILT_FLAGS
, OFF
);
3409 adapter
->if_flags
&= ~BE_IF_ALL_FILT_FLAGS
;
3413 static int be_close(struct net_device
*netdev
)
3415 struct be_adapter
*adapter
= netdev_priv(netdev
);
3416 struct be_eq_obj
*eqo
;
3419 /* This protection is needed as be_close() may be called even when the
3420 * adapter is in cleared state (after eeh perm failure)
3422 if (!(adapter
->flags
& BE_FLAGS_SETUP_DONE
))
3425 be_disable_if_filters(adapter
);
3427 be_roce_dev_close(adapter
);
3429 if (adapter
->flags
& BE_FLAGS_NAPI_ENABLED
) {
3430 for_all_evt_queues(adapter
, eqo
, i
) {
3431 napi_disable(&eqo
->napi
);
3432 be_disable_busy_poll(eqo
);
3434 adapter
->flags
&= ~BE_FLAGS_NAPI_ENABLED
;
3437 be_async_mcc_disable(adapter
);
3439 /* Wait for all pending tx completions to arrive so that
3440 * all tx skbs are freed.
3442 netif_tx_disable(netdev
);
3443 be_tx_compl_clean(adapter
);
3445 be_rx_qs_destroy(adapter
);
3447 for_all_evt_queues(adapter
, eqo
, i
) {
3448 if (msix_enabled(adapter
))
3449 synchronize_irq(be_msix_vec_get(adapter
, eqo
));
3451 synchronize_irq(netdev
->irq
);
3455 be_irq_unregister(adapter
);
3460 static int be_rx_qs_create(struct be_adapter
*adapter
)
3462 struct rss_info
*rss
= &adapter
->rss_info
;
3463 u8 rss_key
[RSS_HASH_KEY_LEN
];
3464 struct be_rx_obj
*rxo
;
3467 for_all_rx_queues(adapter
, rxo
, i
) {
3468 rc
= be_queue_alloc(adapter
, &rxo
->q
, RX_Q_LEN
,
3469 sizeof(struct be_eth_rx_d
));
3474 if (adapter
->need_def_rxq
|| !adapter
->num_rss_qs
) {
3475 rxo
= default_rxo(adapter
);
3476 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
3477 rx_frag_size
, adapter
->if_handle
,
3478 false, &rxo
->rss_id
);
3483 for_all_rss_queues(adapter
, rxo
, i
) {
3484 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
3485 rx_frag_size
, adapter
->if_handle
,
3486 true, &rxo
->rss_id
);
3491 if (be_multi_rxq(adapter
)) {
3492 for (j
= 0; j
< RSS_INDIR_TABLE_LEN
; j
+= adapter
->num_rss_qs
) {
3493 for_all_rss_queues(adapter
, rxo
, i
) {
3494 if ((j
+ i
) >= RSS_INDIR_TABLE_LEN
)
3496 rss
->rsstable
[j
+ i
] = rxo
->rss_id
;
3497 rss
->rss_queue
[j
+ i
] = i
;
3500 rss
->rss_flags
= RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
|
3501 RSS_ENABLE_TCP_IPV6
| RSS_ENABLE_IPV6
;
3503 if (!BEx_chip(adapter
))
3504 rss
->rss_flags
|= RSS_ENABLE_UDP_IPV4
|
3505 RSS_ENABLE_UDP_IPV6
;
3507 /* Disable RSS, if only default RX Q is created */
3508 rss
->rss_flags
= RSS_ENABLE_NONE
;
3511 netdev_rss_key_fill(rss_key
, RSS_HASH_KEY_LEN
);
3512 rc
= be_cmd_rss_config(adapter
, rss
->rsstable
, rss
->rss_flags
,
3515 rss
->rss_flags
= RSS_ENABLE_NONE
;
3519 memcpy(rss
->rss_hkey
, rss_key
, RSS_HASH_KEY_LEN
);
3521 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3522 * which is a queue empty condition
3524 for_all_rx_queues(adapter
, rxo
, i
)
3525 be_post_rx_frags(rxo
, GFP_KERNEL
, RX_Q_LEN
- 1);
3530 static int be_enable_if_filters(struct be_adapter
*adapter
)
3534 status
= be_cmd_rx_filter(adapter
, BE_IF_EN_FLAGS
, ON
);
3538 /* For BE3 VFs, the PF programs the initial MAC address */
3539 if (!(BEx_chip(adapter
) && be_virtfn(adapter
))) {
3540 status
= be_cmd_pmac_add(adapter
, adapter
->netdev
->dev_addr
,
3542 &adapter
->pmac_id
[0], 0);
3547 if (adapter
->vlans_added
)
3548 be_vid_config(adapter
);
3550 be_set_rx_mode(adapter
->netdev
);
3555 static int be_open(struct net_device
*netdev
)
3557 struct be_adapter
*adapter
= netdev_priv(netdev
);
3558 struct be_eq_obj
*eqo
;
3559 struct be_rx_obj
*rxo
;
3560 struct be_tx_obj
*txo
;
3564 status
= be_rx_qs_create(adapter
);
3568 status
= be_enable_if_filters(adapter
);
3572 status
= be_irq_register(adapter
);
3576 for_all_rx_queues(adapter
, rxo
, i
)
3577 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
3579 for_all_tx_queues(adapter
, txo
, i
)
3580 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
3582 be_async_mcc_enable(adapter
);
3584 for_all_evt_queues(adapter
, eqo
, i
) {
3585 napi_enable(&eqo
->napi
);
3586 be_enable_busy_poll(eqo
);
3587 be_eq_notify(adapter
, eqo
->q
.id
, true, true, 0, 0);
3589 adapter
->flags
|= BE_FLAGS_NAPI_ENABLED
;
3591 status
= be_cmd_link_status_query(adapter
, NULL
, &link_status
, 0);
3593 be_link_status_update(adapter
, link_status
);
3595 netif_tx_start_all_queues(netdev
);
3596 be_roce_dev_open(adapter
);
3598 #ifdef CONFIG_BE2NET_VXLAN
3599 if (skyhawk_chip(adapter
))
3600 vxlan_get_rx_port(netdev
);
3605 be_close(adapter
->netdev
);
3609 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
3611 struct be_dma_mem cmd
;
3617 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
3618 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
3624 status
= pci_write_config_dword(adapter
->pdev
,
3625 PCICFG_PM_CONTROL_OFFSET
,
3626 PCICFG_PM_CONTROL_MASK
);
3628 dev_err(&adapter
->pdev
->dev
,
3629 "Could not enable Wake-on-lan\n");
3630 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
3634 status
= be_cmd_enable_magic_wol(adapter
,
3635 adapter
->netdev
->dev_addr
,
3637 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
3638 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
3640 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
3641 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
3642 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
3645 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3649 static void be_vf_eth_addr_generate(struct be_adapter
*adapter
, u8
*mac
)
3653 addr
= jhash(adapter
->netdev
->dev_addr
, ETH_ALEN
, 0);
3655 mac
[5] = (u8
)(addr
& 0xFF);
3656 mac
[4] = (u8
)((addr
>> 8) & 0xFF);
3657 mac
[3] = (u8
)((addr
>> 16) & 0xFF);
3658 /* Use the OUI from the current MAC address */
3659 memcpy(mac
, adapter
->netdev
->dev_addr
, 3);
3663 * Generate a seed MAC address from the PF MAC Address using jhash.
3664 * MAC Address for VFs are assigned incrementally starting from the seed.
3665 * These addresses are programmed in the ASIC by the PF and the VF driver
3666 * queries for the MAC address during its probe.
3668 static int be_vf_eth_addr_config(struct be_adapter
*adapter
)
3673 struct be_vf_cfg
*vf_cfg
;
3675 be_vf_eth_addr_generate(adapter
, mac
);
3677 for_all_vfs(adapter
, vf_cfg
, vf
) {
3678 if (BEx_chip(adapter
))
3679 status
= be_cmd_pmac_add(adapter
, mac
,
3681 &vf_cfg
->pmac_id
, vf
+ 1);
3683 status
= be_cmd_set_mac(adapter
, mac
, vf_cfg
->if_handle
,
3687 dev_err(&adapter
->pdev
->dev
,
3688 "Mac address assignment failed for VF %d\n",
3691 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
3698 static int be_vfs_mac_query(struct be_adapter
*adapter
)
3702 struct be_vf_cfg
*vf_cfg
;
3704 for_all_vfs(adapter
, vf_cfg
, vf
) {
3705 status
= be_cmd_get_active_mac(adapter
, vf_cfg
->pmac_id
,
3706 mac
, vf_cfg
->if_handle
,
3710 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
3715 static void be_vf_clear(struct be_adapter
*adapter
)
3717 struct be_vf_cfg
*vf_cfg
;
3720 if (pci_vfs_assigned(adapter
->pdev
)) {
3721 dev_warn(&adapter
->pdev
->dev
,
3722 "VFs are assigned to VMs: not disabling VFs\n");
3726 pci_disable_sriov(adapter
->pdev
);
3728 for_all_vfs(adapter
, vf_cfg
, vf
) {
3729 if (BEx_chip(adapter
))
3730 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
3731 vf_cfg
->pmac_id
, vf
+ 1);
3733 be_cmd_set_mac(adapter
, NULL
, vf_cfg
->if_handle
,
3736 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
3739 kfree(adapter
->vf_cfg
);
3740 adapter
->num_vfs
= 0;
3741 adapter
->flags
&= ~BE_FLAGS_SRIOV_ENABLED
;
3744 static void be_clear_queues(struct be_adapter
*adapter
)
3746 be_mcc_queues_destroy(adapter
);
3747 be_rx_cqs_destroy(adapter
);
3748 be_tx_queues_destroy(adapter
);
3749 be_evt_queues_destroy(adapter
);
3752 static void be_cancel_worker(struct be_adapter
*adapter
)
3754 if (adapter
->flags
& BE_FLAGS_WORKER_SCHEDULED
) {
3755 cancel_delayed_work_sync(&adapter
->work
);
3756 adapter
->flags
&= ~BE_FLAGS_WORKER_SCHEDULED
;
3760 static void be_cancel_err_detection(struct be_adapter
*adapter
)
3762 if (adapter
->flags
& BE_FLAGS_ERR_DETECTION_SCHEDULED
) {
3763 cancel_delayed_work_sync(&adapter
->be_err_detection_work
);
3764 adapter
->flags
&= ~BE_FLAGS_ERR_DETECTION_SCHEDULED
;
3768 #ifdef CONFIG_BE2NET_VXLAN
3769 static void be_disable_vxlan_offloads(struct be_adapter
*adapter
)
3771 struct net_device
*netdev
= adapter
->netdev
;
3773 if (adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
)
3774 be_cmd_manage_iface(adapter
, adapter
->if_handle
,
3775 OP_CONVERT_TUNNEL_TO_NORMAL
);
3777 if (adapter
->vxlan_port
)
3778 be_cmd_set_vxlan_port(adapter
, 0);
3780 adapter
->flags
&= ~BE_FLAGS_VXLAN_OFFLOADS
;
3781 adapter
->vxlan_port
= 0;
3783 netdev
->hw_enc_features
= 0;
3784 netdev
->hw_features
&= ~(NETIF_F_GSO_UDP_TUNNEL
);
3785 netdev
->features
&= ~(NETIF_F_GSO_UDP_TUNNEL
);
3789 static u16
be_calculate_vf_qs(struct be_adapter
*adapter
, u16 num_vfs
)
3791 struct be_resources res
= adapter
->pool_res
;
3794 /* Distribute the queue resources equally among the PF and it's VFs
3795 * Do not distribute queue resources in multi-channel configuration.
3797 if (num_vfs
&& !be_is_mc(adapter
)) {
3798 /* If number of VFs requested is 8 less than max supported,
3799 * assign 8 queue pairs to the PF and divide the remaining
3800 * resources evenly among the VFs
3802 if (num_vfs
< (be_max_vfs(adapter
) - 8))
3803 num_vf_qs
= (res
.max_rss_qs
- 8) / num_vfs
;
3805 num_vf_qs
= res
.max_rss_qs
/ num_vfs
;
3807 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3808 * interfaces per port. Provide RSS on VFs, only if number
3809 * of VFs requested is less than MAX_RSS_IFACES limit.
3811 if (num_vfs
>= MAX_RSS_IFACES
)
3817 static int be_clear(struct be_adapter
*adapter
)
3819 struct pci_dev
*pdev
= adapter
->pdev
;
3822 be_cancel_worker(adapter
);
3824 if (sriov_enabled(adapter
))
3825 be_vf_clear(adapter
);
3827 /* Re-configure FW to distribute resources evenly across max-supported
3828 * number of VFs, only when VFs are not already enabled.
3830 if (skyhawk_chip(adapter
) && be_physfn(adapter
) &&
3831 !pci_vfs_assigned(pdev
)) {
3832 num_vf_qs
= be_calculate_vf_qs(adapter
,
3833 pci_sriov_get_totalvfs(pdev
));
3834 be_cmd_set_sriov_config(adapter
, adapter
->pool_res
,
3835 pci_sriov_get_totalvfs(pdev
),
3839 #ifdef CONFIG_BE2NET_VXLAN
3840 be_disable_vxlan_offloads(adapter
);
3842 kfree(adapter
->pmac_id
);
3843 adapter
->pmac_id
= NULL
;
3845 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
3847 be_clear_queues(adapter
);
3849 be_msix_disable(adapter
);
3850 adapter
->flags
&= ~BE_FLAGS_SETUP_DONE
;
3854 static int be_vfs_if_create(struct be_adapter
*adapter
)
3856 struct be_resources res
= {0};
3857 u32 cap_flags
, en_flags
, vf
;
3858 struct be_vf_cfg
*vf_cfg
;
3861 /* If a FW profile exists, then cap_flags are updated */
3862 cap_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
3863 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
3865 for_all_vfs(adapter
, vf_cfg
, vf
) {
3866 if (!BE3_chip(adapter
)) {
3867 status
= be_cmd_get_profile_config(adapter
, &res
,
3871 cap_flags
= res
.if_cap_flags
;
3872 /* Prevent VFs from enabling VLAN promiscuous
3875 cap_flags
&= ~BE_IF_FLAGS_VLAN_PROMISCUOUS
;
3879 en_flags
= cap_flags
& (BE_IF_FLAGS_UNTAGGED
|
3880 BE_IF_FLAGS_BROADCAST
|
3881 BE_IF_FLAGS_MULTICAST
|
3882 BE_IF_FLAGS_PASS_L3L4_ERRORS
);
3883 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
3884 &vf_cfg
->if_handle
, vf
+ 1);
3892 static int be_vf_setup_init(struct be_adapter
*adapter
)
3894 struct be_vf_cfg
*vf_cfg
;
3897 adapter
->vf_cfg
= kcalloc(adapter
->num_vfs
, sizeof(*vf_cfg
),
3899 if (!adapter
->vf_cfg
)
3902 for_all_vfs(adapter
, vf_cfg
, vf
) {
3903 vf_cfg
->if_handle
= -1;
3904 vf_cfg
->pmac_id
= -1;
3909 static int be_vf_setup(struct be_adapter
*adapter
)
3911 struct device
*dev
= &adapter
->pdev
->dev
;
3912 struct be_vf_cfg
*vf_cfg
;
3913 int status
, old_vfs
, vf
;
3916 old_vfs
= pci_num_vf(adapter
->pdev
);
3918 status
= be_vf_setup_init(adapter
);
3923 for_all_vfs(adapter
, vf_cfg
, vf
) {
3924 status
= be_cmd_get_if_id(adapter
, vf_cfg
, vf
);
3929 status
= be_vfs_mac_query(adapter
);
3933 status
= be_vfs_if_create(adapter
);
3937 status
= be_vf_eth_addr_config(adapter
);
3942 for_all_vfs(adapter
, vf_cfg
, vf
) {
3943 /* Allow VFs to programs MAC/VLAN filters */
3944 status
= be_cmd_get_fn_privileges(adapter
, &vf_cfg
->privileges
,
3946 if (!status
&& !(vf_cfg
->privileges
& BE_PRIV_FILTMGMT
)) {
3947 status
= be_cmd_set_fn_privileges(adapter
,
3948 vf_cfg
->privileges
|
3952 vf_cfg
->privileges
|= BE_PRIV_FILTMGMT
;
3953 dev_info(dev
, "VF%d has FILTMGMT privilege\n",
3958 /* Allow full available bandwidth */
3960 be_cmd_config_qos(adapter
, 0, 0, vf
+ 1);
3962 status
= be_cmd_get_hsw_config(adapter
, NULL
, vf
+ 1,
3963 vf_cfg
->if_handle
, NULL
,
3966 vf_cfg
->spoofchk
= spoofchk
;
3969 be_cmd_enable_vf(adapter
, vf
+ 1);
3970 be_cmd_set_logical_link_config(adapter
,
3971 IFLA_VF_LINK_STATE_AUTO
,
3977 status
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
3979 dev_err(dev
, "SRIOV enable failed\n");
3980 adapter
->num_vfs
= 0;
3985 adapter
->flags
|= BE_FLAGS_SRIOV_ENABLED
;
3988 dev_err(dev
, "VF setup failed\n");
3989 be_vf_clear(adapter
);
3993 /* Converting function_mode bits on BE3 to SH mc_type enums */
3995 static u8
be_convert_mc_type(u32 function_mode
)
3997 if (function_mode
& VNIC_MODE
&& function_mode
& QNQ_MODE
)
3999 else if (function_mode
& QNQ_MODE
)
4001 else if (function_mode
& VNIC_MODE
)
4003 else if (function_mode
& UMC_ENABLED
)
4009 /* On BE2/BE3 FW does not suggest the supported limits */
4010 static void BEx_get_resources(struct be_adapter
*adapter
,
4011 struct be_resources
*res
)
4013 bool use_sriov
= adapter
->num_vfs
? 1 : 0;
4015 if (be_physfn(adapter
))
4016 res
->max_uc_mac
= BE_UC_PMAC_COUNT
;
4018 res
->max_uc_mac
= BE_VF_UC_PMAC_COUNT
;
4020 adapter
->mc_type
= be_convert_mc_type(adapter
->function_mode
);
4022 if (be_is_mc(adapter
)) {
4023 /* Assuming that there are 4 channels per port,
4024 * when multi-channel is enabled
4026 if (be_is_qnq_mode(adapter
))
4027 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/8;
4029 /* In a non-qnq multichannel mode, the pvid
4030 * takes up one vlan entry
4032 res
->max_vlans
= (BE_NUM_VLANS_SUPPORTED
/ 4) - 1;
4034 res
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
4037 res
->max_mcast_mac
= BE_MAX_MC
;
4039 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4040 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4041 * *only* if it is RSS-capable.
4043 if (BE2_chip(adapter
) || use_sriov
|| (adapter
->port_num
> 1) ||
4044 be_virtfn(adapter
) ||
4045 (be_is_mc(adapter
) &&
4046 !(adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
))) {
4048 } else if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
) {
4049 struct be_resources super_nic_res
= {0};
4051 /* On a SuperNIC profile, the driver needs to use the
4052 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4054 be_cmd_get_profile_config(adapter
, &super_nic_res
,
4055 RESOURCE_LIMITS
, 0);
4056 /* Some old versions of BE3 FW don't report max_tx_qs value */
4057 res
->max_tx_qs
= super_nic_res
.max_tx_qs
? : BE3_MAX_TX_QS
;
4059 res
->max_tx_qs
= BE3_MAX_TX_QS
;
4062 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
4063 !use_sriov
&& be_physfn(adapter
))
4064 res
->max_rss_qs
= (adapter
->be3_native
) ?
4065 BE3_MAX_RSS_QS
: BE2_MAX_RSS_QS
;
4066 res
->max_rx_qs
= res
->max_rss_qs
+ 1;
4068 if (be_physfn(adapter
))
4069 res
->max_evt_qs
= (be_max_vfs(adapter
) > 0) ?
4070 BE3_SRIOV_MAX_EVT_QS
: BE3_MAX_EVT_QS
;
4072 res
->max_evt_qs
= 1;
4074 res
->if_cap_flags
= BE_IF_CAP_FLAGS_WANT
;
4075 res
->if_cap_flags
&= ~BE_IF_FLAGS_DEFQ_RSS
;
4076 if (!(adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
))
4077 res
->if_cap_flags
&= ~BE_IF_FLAGS_RSS
;
4080 static void be_setup_init(struct be_adapter
*adapter
)
4082 adapter
->vlan_prio_bmap
= 0xff;
4083 adapter
->phy
.link_speed
= -1;
4084 adapter
->if_handle
= -1;
4085 adapter
->be3_native
= false;
4086 adapter
->if_flags
= 0;
4087 if (be_physfn(adapter
))
4088 adapter
->cmd_privileges
= MAX_PRIVILEGES
;
4090 adapter
->cmd_privileges
= MIN_PRIVILEGES
;
4093 static int be_get_sriov_config(struct be_adapter
*adapter
)
4095 struct be_resources res
= {0};
4096 int max_vfs
, old_vfs
;
4098 be_cmd_get_profile_config(adapter
, &res
, RESOURCE_LIMITS
, 0);
4100 /* Some old versions of BE3 FW don't report max_vfs value */
4101 if (BE3_chip(adapter
) && !res
.max_vfs
) {
4102 max_vfs
= pci_sriov_get_totalvfs(adapter
->pdev
);
4103 res
.max_vfs
= max_vfs
> 0 ? min(MAX_VFS
, max_vfs
) : 0;
4106 adapter
->pool_res
= res
;
4108 /* If during previous unload of the driver, the VFs were not disabled,
4109 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4110 * Instead use the TotalVFs value stored in the pci-dev struct.
4112 old_vfs
= pci_num_vf(adapter
->pdev
);
4114 dev_info(&adapter
->pdev
->dev
, "%d VFs are already enabled\n",
4117 adapter
->pool_res
.max_vfs
=
4118 pci_sriov_get_totalvfs(adapter
->pdev
);
4119 adapter
->num_vfs
= old_vfs
;
4125 static void be_alloc_sriov_res(struct be_adapter
*adapter
)
4127 int old_vfs
= pci_num_vf(adapter
->pdev
);
4131 be_get_sriov_config(adapter
);
4134 pci_sriov_set_totalvfs(adapter
->pdev
, be_max_vfs(adapter
));
4136 /* When the HW is in SRIOV capable configuration, the PF-pool
4137 * resources are given to PF during driver load, if there are no
4138 * old VFs. This facility is not available in BE3 FW.
4139 * Also, this is done by FW in Lancer chip.
4141 if (skyhawk_chip(adapter
) && be_max_vfs(adapter
) && !old_vfs
) {
4142 num_vf_qs
= be_calculate_vf_qs(adapter
, 0);
4143 status
= be_cmd_set_sriov_config(adapter
, adapter
->pool_res
, 0,
4146 dev_err(&adapter
->pdev
->dev
,
4147 "Failed to optimize SRIOV resources\n");
4151 static int be_get_resources(struct be_adapter
*adapter
)
4153 struct device
*dev
= &adapter
->pdev
->dev
;
4154 struct be_resources res
= {0};
4157 if (BEx_chip(adapter
)) {
4158 BEx_get_resources(adapter
, &res
);
4162 /* For Lancer, SH etc read per-function resource limits from FW.
4163 * GET_FUNC_CONFIG returns per function guaranteed limits.
4164 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4166 if (!BEx_chip(adapter
)) {
4167 status
= be_cmd_get_func_config(adapter
, &res
);
4171 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4172 if (res
.max_rss_qs
&& res
.max_rss_qs
== res
.max_rx_qs
&&
4173 !(res
.if_cap_flags
& BE_IF_FLAGS_DEFQ_RSS
))
4174 res
.max_rss_qs
-= 1;
4176 /* If RoCE may be enabled stash away half the EQs for RoCE */
4177 if (be_roce_supported(adapter
))
4178 res
.max_evt_qs
/= 2;
4182 /* If FW supports RSS default queue, then skip creating non-RSS
4183 * queue for non-IP traffic.
4185 adapter
->need_def_rxq
= (be_if_cap_flags(adapter
) &
4186 BE_IF_FLAGS_DEFQ_RSS
) ? 0 : 1;
4188 dev_info(dev
, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4189 be_max_txqs(adapter
), be_max_rxqs(adapter
),
4190 be_max_rss(adapter
), be_max_eqs(adapter
),
4191 be_max_vfs(adapter
));
4192 dev_info(dev
, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4193 be_max_uc(adapter
), be_max_mc(adapter
),
4194 be_max_vlans(adapter
));
4196 /* Sanitize cfg_num_qs based on HW and platform limits */
4197 adapter
->cfg_num_qs
= min_t(u16
, netif_get_num_default_rss_queues(),
4198 be_max_qs(adapter
));
4202 static int be_get_config(struct be_adapter
*adapter
)
4207 status
= be_cmd_get_cntl_attributes(adapter
);
4211 status
= be_cmd_query_fw_cfg(adapter
);
4215 if (BEx_chip(adapter
)) {
4216 level
= be_cmd_get_fw_log_level(adapter
);
4217 adapter
->msg_enable
=
4218 level
<= FW_LOG_LEVEL_DEFAULT
? NETIF_MSG_HW
: 0;
4221 be_cmd_get_acpi_wol_cap(adapter
);
4223 be_cmd_query_port_name(adapter
);
4225 if (be_physfn(adapter
)) {
4226 status
= be_cmd_get_active_profile(adapter
, &profile_id
);
4228 dev_info(&adapter
->pdev
->dev
,
4229 "Using profile 0x%x\n", profile_id
);
4232 status
= be_get_resources(adapter
);
4236 adapter
->pmac_id
= kcalloc(be_max_uc(adapter
),
4237 sizeof(*adapter
->pmac_id
), GFP_KERNEL
);
4238 if (!adapter
->pmac_id
)
4244 static int be_mac_setup(struct be_adapter
*adapter
)
4249 if (is_zero_ether_addr(adapter
->netdev
->dev_addr
)) {
4250 status
= be_cmd_get_perm_mac(adapter
, mac
);
4254 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
4255 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
4261 static void be_schedule_worker(struct be_adapter
*adapter
)
4263 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
4264 adapter
->flags
|= BE_FLAGS_WORKER_SCHEDULED
;
4267 static void be_schedule_err_detection(struct be_adapter
*adapter
)
4269 schedule_delayed_work(&adapter
->be_err_detection_work
,
4270 msecs_to_jiffies(1000));
4271 adapter
->flags
|= BE_FLAGS_ERR_DETECTION_SCHEDULED
;
4274 static int be_setup_queues(struct be_adapter
*adapter
)
4276 struct net_device
*netdev
= adapter
->netdev
;
4279 status
= be_evt_queues_create(adapter
);
4283 status
= be_tx_qs_create(adapter
);
4287 status
= be_rx_cqs_create(adapter
);
4291 status
= be_mcc_queues_create(adapter
);
4295 status
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_qs
);
4299 status
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_qs
);
4305 dev_err(&adapter
->pdev
->dev
, "queue_setup failed\n");
4309 int be_update_queues(struct be_adapter
*adapter
)
4311 struct net_device
*netdev
= adapter
->netdev
;
4314 if (netif_running(netdev
))
4317 be_cancel_worker(adapter
);
4319 /* If any vectors have been shared with RoCE we cannot re-program
4322 if (!adapter
->num_msix_roce_vec
)
4323 be_msix_disable(adapter
);
4325 be_clear_queues(adapter
);
4327 if (!msix_enabled(adapter
)) {
4328 status
= be_msix_enable(adapter
);
4333 status
= be_setup_queues(adapter
);
4337 be_schedule_worker(adapter
);
4339 if (netif_running(netdev
))
4340 status
= be_open(netdev
);
4345 static inline int fw_major_num(const char *fw_ver
)
4347 int fw_major
= 0, i
;
4349 i
= sscanf(fw_ver
, "%d.", &fw_major
);
4356 /* If any VFs are already enabled don't FLR the PF */
4357 static bool be_reset_required(struct be_adapter
*adapter
)
4359 return pci_num_vf(adapter
->pdev
) ? false : true;
4362 /* Wait for the FW to be ready and perform the required initialization */
4363 static int be_func_init(struct be_adapter
*adapter
)
4367 status
= be_fw_wait_ready(adapter
);
4371 if (be_reset_required(adapter
)) {
4372 status
= be_cmd_reset_function(adapter
);
4376 /* Wait for interrupts to quiesce after an FLR */
4379 /* We can clear all errors when function reset succeeds */
4380 be_clear_error(adapter
, BE_CLEAR_ALL
);
4383 /* Tell FW we're ready to fire cmds */
4384 status
= be_cmd_fw_init(adapter
);
4388 /* Allow interrupts for other ULPs running on NIC function */
4389 be_intr_set(adapter
, true);
4394 static int be_setup(struct be_adapter
*adapter
)
4396 struct device
*dev
= &adapter
->pdev
->dev
;
4400 status
= be_func_init(adapter
);
4404 be_setup_init(adapter
);
4406 if (!lancer_chip(adapter
))
4407 be_cmd_req_native_mode(adapter
);
4409 if (!BE2_chip(adapter
) && be_physfn(adapter
))
4410 be_alloc_sriov_res(adapter
);
4412 status
= be_get_config(adapter
);
4416 status
= be_msix_enable(adapter
);
4420 /* will enable all the needed filter flags in be_open() */
4421 en_flags
= BE_IF_FLAGS_RSS
| BE_IF_FLAGS_DEFQ_RSS
;
4422 en_flags
= en_flags
& be_if_cap_flags(adapter
);
4423 status
= be_cmd_if_create(adapter
, be_if_cap_flags(adapter
), en_flags
,
4424 &adapter
->if_handle
, 0);
4428 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4430 status
= be_setup_queues(adapter
);
4435 be_cmd_get_fn_privileges(adapter
, &adapter
->cmd_privileges
, 0);
4437 status
= be_mac_setup(adapter
);
4441 be_cmd_get_fw_ver(adapter
);
4442 dev_info(dev
, "FW version is %s\n", adapter
->fw_ver
);
4444 if (BE2_chip(adapter
) && fw_major_num(adapter
->fw_ver
) < 4) {
4445 dev_err(dev
, "Firmware on card is old(%s), IRQs may not work",
4447 dev_err(dev
, "Please upgrade firmware to version >= 4.0\n");
4450 status
= be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
4453 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
,
4456 dev_info(&adapter
->pdev
->dev
, "HW Flow control - TX:%d RX:%d\n",
4457 adapter
->tx_fc
, adapter
->rx_fc
);
4459 if (be_physfn(adapter
))
4460 be_cmd_set_logical_link_config(adapter
,
4461 IFLA_VF_LINK_STATE_AUTO
, 0);
4463 if (adapter
->num_vfs
)
4464 be_vf_setup(adapter
);
4466 status
= be_cmd_get_phy_info(adapter
);
4467 if (!status
&& be_pause_supported(adapter
))
4468 adapter
->phy
.fc_autoneg
= 1;
4470 be_schedule_worker(adapter
);
4471 adapter
->flags
|= BE_FLAGS_SETUP_DONE
;
4478 #ifdef CONFIG_NET_POLL_CONTROLLER
4479 static void be_netpoll(struct net_device
*netdev
)
4481 struct be_adapter
*adapter
= netdev_priv(netdev
);
4482 struct be_eq_obj
*eqo
;
4485 for_all_evt_queues(adapter
, eqo
, i
) {
4486 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0, 0);
4487 napi_schedule(&eqo
->napi
);
4492 static char flash_cookie
[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
4494 static bool phy_flashing_required(struct be_adapter
*adapter
)
4496 return (adapter
->phy
.phy_type
== PHY_TYPE_TN_8022
&&
4497 adapter
->phy
.interface_type
== PHY_TYPE_BASET_10GB
);
4500 static bool is_comp_in_ufi(struct be_adapter
*adapter
,
4501 struct flash_section_info
*fsec
, int type
)
4503 int i
= 0, img_type
= 0;
4504 struct flash_section_info_g2
*fsec_g2
= NULL
;
4506 if (BE2_chip(adapter
))
4507 fsec_g2
= (struct flash_section_info_g2
*)fsec
;
4509 for (i
= 0; i
< MAX_FLASH_COMP
; i
++) {
4511 img_type
= le32_to_cpu(fsec_g2
->fsec_entry
[i
].type
);
4513 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
4515 if (img_type
== type
)
4522 static struct flash_section_info
*get_fsec_info(struct be_adapter
*adapter
,
4524 const struct firmware
*fw
)
4526 struct flash_section_info
*fsec
= NULL
;
4527 const u8
*p
= fw
->data
;
4530 while (p
< (fw
->data
+ fw
->size
)) {
4531 fsec
= (struct flash_section_info
*)p
;
4532 if (!memcmp(flash_cookie
, fsec
->cookie
, sizeof(flash_cookie
)))
4539 static int be_check_flash_crc(struct be_adapter
*adapter
, const u8
*p
,
4540 u32 img_offset
, u32 img_size
, int hdr_size
,
4541 u16 img_optype
, bool *crc_match
)
4547 status
= be_cmd_get_flash_crc(adapter
, crc
, img_optype
, img_offset
,
4552 crc_offset
= hdr_size
+ img_offset
+ img_size
- 4;
4554 /* Skip flashing, if crc of flashed region matches */
4555 if (!memcmp(crc
, p
+ crc_offset
, 4))
4563 static int be_flash(struct be_adapter
*adapter
, const u8
*img
,
4564 struct be_dma_mem
*flash_cmd
, int optype
, int img_size
,
4567 u32 flash_op
, num_bytes
, total_bytes
= img_size
, bytes_sent
= 0;
4568 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
4571 while (total_bytes
) {
4572 num_bytes
= min_t(u32
, 32*1024, total_bytes
);
4574 total_bytes
-= num_bytes
;
4577 if (optype
== OPTYPE_PHY_FW
)
4578 flash_op
= FLASHROM_OPER_PHY_FLASH
;
4580 flash_op
= FLASHROM_OPER_FLASH
;
4582 if (optype
== OPTYPE_PHY_FW
)
4583 flash_op
= FLASHROM_OPER_PHY_SAVE
;
4585 flash_op
= FLASHROM_OPER_SAVE
;
4588 memcpy(req
->data_buf
, img
, num_bytes
);
4590 status
= be_cmd_write_flashrom(adapter
, flash_cmd
, optype
,
4591 flash_op
, img_offset
+
4592 bytes_sent
, num_bytes
);
4593 if (base_status(status
) == MCC_STATUS_ILLEGAL_REQUEST
&&
4594 optype
== OPTYPE_PHY_FW
)
4599 bytes_sent
+= num_bytes
;
4604 /* For BE2, BE3 and BE3-R */
4605 static int be_flash_BEx(struct be_adapter
*adapter
,
4606 const struct firmware
*fw
,
4607 struct be_dma_mem
*flash_cmd
, int num_of_images
)
4609 int img_hdrs_size
= (num_of_images
* sizeof(struct image_hdr
));
4610 struct device
*dev
= &adapter
->pdev
->dev
;
4611 struct flash_section_info
*fsec
= NULL
;
4612 int status
, i
, filehdr_size
, num_comp
;
4613 const struct flash_comp
*pflashcomp
;
4617 struct flash_comp gen3_flash_types
[] = {
4618 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, OPTYPE_ISCSI_ACTIVE
,
4619 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_iSCSI
},
4620 { FLASH_REDBOOT_START_g3
, OPTYPE_REDBOOT
,
4621 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
, IMAGE_BOOT_CODE
},
4622 { FLASH_iSCSI_BIOS_START_g3
, OPTYPE_BIOS
,
4623 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_ISCSI
},
4624 { FLASH_PXE_BIOS_START_g3
, OPTYPE_PXE_BIOS
,
4625 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_PXE
},
4626 { FLASH_FCoE_BIOS_START_g3
, OPTYPE_FCOE_BIOS
,
4627 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_FCoE
},
4628 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, OPTYPE_ISCSI_BACKUP
,
4629 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
4630 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, OPTYPE_FCOE_FW_ACTIVE
,
4631 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_FCoE
},
4632 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, OPTYPE_FCOE_FW_BACKUP
,
4633 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_FCoE
},
4634 { FLASH_NCSI_START_g3
, OPTYPE_NCSI_FW
,
4635 FLASH_NCSI_IMAGE_MAX_SIZE_g3
, IMAGE_NCSI
},
4636 { FLASH_PHY_FW_START_g3
, OPTYPE_PHY_FW
,
4637 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_PHY
}
4640 struct flash_comp gen2_flash_types
[] = {
4641 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, OPTYPE_ISCSI_ACTIVE
,
4642 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_iSCSI
},
4643 { FLASH_REDBOOT_START_g2
, OPTYPE_REDBOOT
,
4644 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
, IMAGE_BOOT_CODE
},
4645 { FLASH_iSCSI_BIOS_START_g2
, OPTYPE_BIOS
,
4646 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_ISCSI
},
4647 { FLASH_PXE_BIOS_START_g2
, OPTYPE_PXE_BIOS
,
4648 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_PXE
},
4649 { FLASH_FCoE_BIOS_START_g2
, OPTYPE_FCOE_BIOS
,
4650 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_FCoE
},
4651 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, OPTYPE_ISCSI_BACKUP
,
4652 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
4653 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, OPTYPE_FCOE_FW_ACTIVE
,
4654 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_FCoE
},
4655 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, OPTYPE_FCOE_FW_BACKUP
,
4656 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_FCoE
}
4659 if (BE3_chip(adapter
)) {
4660 pflashcomp
= gen3_flash_types
;
4661 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
4662 num_comp
= ARRAY_SIZE(gen3_flash_types
);
4664 pflashcomp
= gen2_flash_types
;
4665 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
4666 num_comp
= ARRAY_SIZE(gen2_flash_types
);
4670 /* Get flash section info*/
4671 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
4673 dev_err(dev
, "Invalid Cookie. FW image may be corrupted\n");
4676 for (i
= 0; i
< num_comp
; i
++) {
4677 if (!is_comp_in_ufi(adapter
, fsec
, pflashcomp
[i
].img_type
))
4680 if ((pflashcomp
[i
].optype
== OPTYPE_NCSI_FW
) &&
4681 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
4684 if (pflashcomp
[i
].optype
== OPTYPE_PHY_FW
&&
4685 !phy_flashing_required(adapter
))
4688 if (pflashcomp
[i
].optype
== OPTYPE_REDBOOT
) {
4689 status
= be_check_flash_crc(adapter
, fw
->data
,
4690 pflashcomp
[i
].offset
,
4694 OPTYPE_REDBOOT
, &crc_match
);
4697 "Could not get CRC for 0x%x region\n",
4698 pflashcomp
[i
].optype
);
4706 p
= fw
->data
+ filehdr_size
+ pflashcomp
[i
].offset
+
4708 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
4711 status
= be_flash(adapter
, p
, flash_cmd
, pflashcomp
[i
].optype
,
4712 pflashcomp
[i
].size
, 0);
4714 dev_err(dev
, "Flashing section type 0x%x failed\n",
4715 pflashcomp
[i
].img_type
);
4722 static u16
be_get_img_optype(struct flash_section_entry fsec_entry
)
4724 u32 img_type
= le32_to_cpu(fsec_entry
.type
);
4725 u16 img_optype
= le16_to_cpu(fsec_entry
.optype
);
4727 if (img_optype
!= 0xFFFF)
4731 case IMAGE_FIRMWARE_iSCSI
:
4732 img_optype
= OPTYPE_ISCSI_ACTIVE
;
4734 case IMAGE_BOOT_CODE
:
4735 img_optype
= OPTYPE_REDBOOT
;
4737 case IMAGE_OPTION_ROM_ISCSI
:
4738 img_optype
= OPTYPE_BIOS
;
4740 case IMAGE_OPTION_ROM_PXE
:
4741 img_optype
= OPTYPE_PXE_BIOS
;
4743 case IMAGE_OPTION_ROM_FCoE
:
4744 img_optype
= OPTYPE_FCOE_BIOS
;
4746 case IMAGE_FIRMWARE_BACKUP_iSCSI
:
4747 img_optype
= OPTYPE_ISCSI_BACKUP
;
4750 img_optype
= OPTYPE_NCSI_FW
;
4752 case IMAGE_FLASHISM_JUMPVECTOR
:
4753 img_optype
= OPTYPE_FLASHISM_JUMPVECTOR
;
4755 case IMAGE_FIRMWARE_PHY
:
4756 img_optype
= OPTYPE_SH_PHY_FW
;
4758 case IMAGE_REDBOOT_DIR
:
4759 img_optype
= OPTYPE_REDBOOT_DIR
;
4761 case IMAGE_REDBOOT_CONFIG
:
4762 img_optype
= OPTYPE_REDBOOT_CONFIG
;
4765 img_optype
= OPTYPE_UFI_DIR
;
4774 static int be_flash_skyhawk(struct be_adapter
*adapter
,
4775 const struct firmware
*fw
,
4776 struct be_dma_mem
*flash_cmd
, int num_of_images
)
4778 int img_hdrs_size
= num_of_images
* sizeof(struct image_hdr
);
4779 bool crc_match
, old_fw_img
, flash_offset_support
= true;
4780 struct device
*dev
= &adapter
->pdev
->dev
;
4781 struct flash_section_info
*fsec
= NULL
;
4782 u32 img_offset
, img_size
, img_type
;
4783 u16 img_optype
, flash_optype
;
4784 int status
, i
, filehdr_size
;
4787 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
4788 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
4790 dev_err(dev
, "Invalid Cookie. FW image may be corrupted\n");
4795 for (i
= 0; i
< le32_to_cpu(fsec
->fsec_hdr
.num_images
); i
++) {
4796 img_offset
= le32_to_cpu(fsec
->fsec_entry
[i
].offset
);
4797 img_size
= le32_to_cpu(fsec
->fsec_entry
[i
].pad_size
);
4798 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
4799 img_optype
= be_get_img_optype(fsec
->fsec_entry
[i
]);
4800 old_fw_img
= fsec
->fsec_entry
[i
].optype
== 0xFFFF;
4802 if (img_optype
== 0xFFFF)
4805 if (flash_offset_support
)
4806 flash_optype
= OPTYPE_OFFSET_SPECIFIED
;
4808 flash_optype
= img_optype
;
4810 /* Don't bother verifying CRC if an old FW image is being
4816 status
= be_check_flash_crc(adapter
, fw
->data
, img_offset
,
4817 img_size
, filehdr_size
+
4818 img_hdrs_size
, flash_optype
,
4820 if (base_status(status
) == MCC_STATUS_ILLEGAL_REQUEST
||
4821 base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
) {
4822 /* The current FW image on the card does not support
4823 * OFFSET based flashing. Retry using older mechanism
4824 * of OPTYPE based flashing
4826 if (flash_optype
== OPTYPE_OFFSET_SPECIFIED
) {
4827 flash_offset_support
= false;
4831 /* The current FW image on the card does not recognize
4832 * the new FLASH op_type. The FW download is partially
4833 * complete. Reboot the server now to enable FW image
4834 * to recognize the new FLASH op_type. To complete the
4835 * remaining process, download the same FW again after
4838 dev_err(dev
, "Flash incomplete. Reset the server\n");
4839 dev_err(dev
, "Download FW image again after reset\n");
4841 } else if (status
) {
4842 dev_err(dev
, "Could not get CRC for 0x%x region\n",
4851 p
= fw
->data
+ filehdr_size
+ img_offset
+ img_hdrs_size
;
4852 if (p
+ img_size
> fw
->data
+ fw
->size
)
4855 status
= be_flash(adapter
, p
, flash_cmd
, flash_optype
, img_size
,
4858 /* The current FW image on the card does not support OFFSET
4859 * based flashing. Retry using older mechanism of OPTYPE based
4862 if (base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
&&
4863 flash_optype
== OPTYPE_OFFSET_SPECIFIED
) {
4864 flash_offset_support
= false;
4868 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4872 (base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
||
4873 (img_optype
== OPTYPE_UFI_DIR
&&
4874 base_status(status
) == MCC_STATUS_FAILED
))) {
4876 } else if (status
) {
4877 dev_err(dev
, "Flashing section type 0x%x failed\n",
4885 static int lancer_fw_download(struct be_adapter
*adapter
,
4886 const struct firmware
*fw
)
4888 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4889 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4890 struct device
*dev
= &adapter
->pdev
->dev
;
4891 struct be_dma_mem flash_cmd
;
4892 const u8
*data_ptr
= NULL
;
4893 u8
*dest_image_ptr
= NULL
;
4894 size_t image_size
= 0;
4896 u32 data_written
= 0;
4902 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
4903 dev_err(dev
, "FW image size should be multiple of 4\n");
4907 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
4908 + LANCER_FW_DOWNLOAD_CHUNK
;
4909 flash_cmd
.va
= dma_zalloc_coherent(dev
, flash_cmd
.size
,
4910 &flash_cmd
.dma
, GFP_KERNEL
);
4914 dest_image_ptr
= flash_cmd
.va
+
4915 sizeof(struct lancer_cmd_req_write_object
);
4916 image_size
= fw
->size
;
4917 data_ptr
= fw
->data
;
4919 while (image_size
) {
4920 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
4922 /* Copy the image chunk content. */
4923 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
4925 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
4927 LANCER_FW_DOWNLOAD_LOCATION
,
4928 &data_written
, &change_status
,
4933 offset
+= data_written
;
4934 data_ptr
+= data_written
;
4935 image_size
-= data_written
;
4939 /* Commit the FW written */
4940 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
4942 LANCER_FW_DOWNLOAD_LOCATION
,
4943 &data_written
, &change_status
,
4947 dma_free_coherent(dev
, flash_cmd
.size
, flash_cmd
.va
, flash_cmd
.dma
);
4949 dev_err(dev
, "Firmware load error\n");
4950 return be_cmd_status(status
);
4953 dev_info(dev
, "Firmware flashed successfully\n");
4955 if (change_status
== LANCER_FW_RESET_NEEDED
) {
4956 dev_info(dev
, "Resetting adapter to activate new FW\n");
4957 status
= lancer_physdev_ctrl(adapter
,
4958 PHYSDEV_CONTROL_FW_RESET_MASK
);
4960 dev_err(dev
, "Adapter busy, could not reset FW\n");
4961 dev_err(dev
, "Reboot server to activate new FW\n");
4963 } else if (change_status
!= LANCER_NO_RESET_NEEDED
) {
4964 dev_info(dev
, "Reboot server to activate new FW\n");
4970 /* Check if the flash image file is compatible with the adapter that
4973 static bool be_check_ufi_compatibility(struct be_adapter
*adapter
,
4974 struct flash_file_hdr_g3
*fhdr
)
4977 dev_err(&adapter
->pdev
->dev
, "Invalid FW UFI file");
4981 /* First letter of the build version is used to identify
4982 * which chip this image file is meant for.
4984 switch (fhdr
->build
[0]) {
4985 case BLD_STR_UFI_TYPE_SH
:
4986 if (!skyhawk_chip(adapter
))
4989 case BLD_STR_UFI_TYPE_BE3
:
4990 if (!BE3_chip(adapter
))
4993 case BLD_STR_UFI_TYPE_BE2
:
4994 if (!BE2_chip(adapter
))
5001 return (fhdr
->asic_type_rev
>= adapter
->asic_rev
);
5004 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
5006 struct device
*dev
= &adapter
->pdev
->dev
;
5007 struct flash_file_hdr_g3
*fhdr3
;
5008 struct image_hdr
*img_hdr_ptr
;
5009 int status
= 0, i
, num_imgs
;
5010 struct be_dma_mem flash_cmd
;
5012 fhdr3
= (struct flash_file_hdr_g3
*)fw
->data
;
5013 if (!be_check_ufi_compatibility(adapter
, fhdr3
)) {
5014 dev_err(dev
, "Flash image is not compatible with adapter\n");
5018 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
);
5019 flash_cmd
.va
= dma_zalloc_coherent(dev
, flash_cmd
.size
, &flash_cmd
.dma
,
5024 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
5025 for (i
= 0; i
< num_imgs
; i
++) {
5026 img_hdr_ptr
= (struct image_hdr
*)(fw
->data
+
5027 (sizeof(struct flash_file_hdr_g3
) +
5028 i
* sizeof(struct image_hdr
)));
5029 if (!BE2_chip(adapter
) &&
5030 le32_to_cpu(img_hdr_ptr
->imageid
) != 1)
5033 if (skyhawk_chip(adapter
))
5034 status
= be_flash_skyhawk(adapter
, fw
, &flash_cmd
,
5037 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
,
5041 dma_free_coherent(dev
, flash_cmd
.size
, flash_cmd
.va
, flash_cmd
.dma
);
5043 dev_info(dev
, "Firmware flashed successfully\n");
5048 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
5050 const struct firmware
*fw
;
5053 if (!netif_running(adapter
->netdev
)) {
5054 dev_err(&adapter
->pdev
->dev
,
5055 "Firmware load not allowed (interface is down)\n");
5059 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
5063 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
5065 if (lancer_chip(adapter
))
5066 status
= lancer_fw_download(adapter
, fw
);
5068 status
= be_fw_download(adapter
, fw
);
5071 be_cmd_get_fw_ver(adapter
);
5074 release_firmware(fw
);
5078 static int be_ndo_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
5081 struct be_adapter
*adapter
= netdev_priv(dev
);
5082 struct nlattr
*attr
, *br_spec
;
5087 if (!sriov_enabled(adapter
))
5090 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
5094 nla_for_each_nested(attr
, br_spec
, rem
) {
5095 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
5098 if (nla_len(attr
) < sizeof(mode
))
5101 mode
= nla_get_u16(attr
);
5102 if (mode
!= BRIDGE_MODE_VEPA
&& mode
!= BRIDGE_MODE_VEB
)
5105 status
= be_cmd_set_hsw_config(adapter
, 0, 0,
5107 mode
== BRIDGE_MODE_VEPA
?
5108 PORT_FWD_TYPE_VEPA
:
5109 PORT_FWD_TYPE_VEB
, 0);
5113 dev_info(&adapter
->pdev
->dev
, "enabled switch mode: %s\n",
5114 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
5119 dev_err(&adapter
->pdev
->dev
, "Failed to set switch mode %s\n",
5120 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
5125 static int be_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
5126 struct net_device
*dev
, u32 filter_mask
,
5129 struct be_adapter
*adapter
= netdev_priv(dev
);
5133 if (!sriov_enabled(adapter
))
5136 /* BE and Lancer chips support VEB mode only */
5137 if (BEx_chip(adapter
) || lancer_chip(adapter
)) {
5138 hsw_mode
= PORT_FWD_TYPE_VEB
;
5140 status
= be_cmd_get_hsw_config(adapter
, NULL
, 0,
5141 adapter
->if_handle
, &hsw_mode
,
5147 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
5148 hsw_mode
== PORT_FWD_TYPE_VEPA
?
5149 BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
,
5150 0, 0, nlflags
, filter_mask
, NULL
);
5153 #ifdef CONFIG_BE2NET_VXLAN
5154 /* VxLAN offload Notes:
5156 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5157 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5158 * is expected to work across all types of IP tunnels once exported. Skyhawk
5159 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
5160 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5161 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5162 * those other tunnels are unexported on the fly through ndo_features_check().
5164 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5165 * adds more than one port, disable offloads and don't re-enable them again
5166 * until after all the tunnels are removed.
5168 static void be_add_vxlan_port(struct net_device
*netdev
, sa_family_t sa_family
,
5171 struct be_adapter
*adapter
= netdev_priv(netdev
);
5172 struct device
*dev
= &adapter
->pdev
->dev
;
5175 if (lancer_chip(adapter
) || BEx_chip(adapter
))
5178 if (adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
) {
5180 "Only one UDP port supported for VxLAN offloads\n");
5181 dev_info(dev
, "Disabling VxLAN offloads\n");
5182 adapter
->vxlan_port_count
++;
5186 if (adapter
->vxlan_port_count
++ >= 1)
5189 status
= be_cmd_manage_iface(adapter
, adapter
->if_handle
,
5190 OP_CONVERT_NORMAL_TO_TUNNEL
);
5192 dev_warn(dev
, "Failed to convert normal interface to tunnel\n");
5196 status
= be_cmd_set_vxlan_port(adapter
, port
);
5198 dev_warn(dev
, "Failed to add VxLAN port\n");
5201 adapter
->flags
|= BE_FLAGS_VXLAN_OFFLOADS
;
5202 adapter
->vxlan_port
= port
;
5204 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5205 NETIF_F_TSO
| NETIF_F_TSO6
|
5206 NETIF_F_GSO_UDP_TUNNEL
;
5207 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
5208 netdev
->features
|= NETIF_F_GSO_UDP_TUNNEL
;
5210 dev_info(dev
, "Enabled VxLAN offloads for UDP port %d\n",
5214 be_disable_vxlan_offloads(adapter
);
5217 static void be_del_vxlan_port(struct net_device
*netdev
, sa_family_t sa_family
,
5220 struct be_adapter
*adapter
= netdev_priv(netdev
);
5222 if (lancer_chip(adapter
) || BEx_chip(adapter
))
5225 if (adapter
->vxlan_port
!= port
)
5228 be_disable_vxlan_offloads(adapter
);
5230 dev_info(&adapter
->pdev
->dev
,
5231 "Disabled VxLAN offloads for UDP port %d\n",
5234 adapter
->vxlan_port_count
--;
5237 static netdev_features_t
be_features_check(struct sk_buff
*skb
,
5238 struct net_device
*dev
,
5239 netdev_features_t features
)
5241 struct be_adapter
*adapter
= netdev_priv(dev
);
5244 /* The code below restricts offload features for some tunneled packets.
5245 * Offload features for normal (non tunnel) packets are unchanged.
5247 if (!skb
->encapsulation
||
5248 !(adapter
->flags
& BE_FLAGS_VXLAN_OFFLOADS
))
5251 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5252 * should disable tunnel offload features if it's not a VxLAN packet,
5253 * as tunnel offloads have been enabled only for VxLAN. This is done to
5254 * allow other tunneled traffic like GRE work fine while VxLAN
5255 * offloads are configured in Skyhawk-R.
5257 switch (vlan_get_protocol(skb
)) {
5258 case htons(ETH_P_IP
):
5259 l4_hdr
= ip_hdr(skb
)->protocol
;
5261 case htons(ETH_P_IPV6
):
5262 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
5268 if (l4_hdr
!= IPPROTO_UDP
||
5269 skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
5270 skb
->inner_protocol
!= htons(ETH_P_TEB
) ||
5271 skb_inner_mac_header(skb
) - skb_transport_header(skb
) !=
5272 sizeof(struct udphdr
) + sizeof(struct vxlanhdr
))
5273 return features
& ~(NETIF_F_ALL_CSUM
| NETIF_F_GSO_MASK
);
5279 static const struct net_device_ops be_netdev_ops
= {
5280 .ndo_open
= be_open
,
5281 .ndo_stop
= be_close
,
5282 .ndo_start_xmit
= be_xmit
,
5283 .ndo_set_rx_mode
= be_set_rx_mode
,
5284 .ndo_set_mac_address
= be_mac_addr_set
,
5285 .ndo_change_mtu
= be_change_mtu
,
5286 .ndo_get_stats64
= be_get_stats64
,
5287 .ndo_validate_addr
= eth_validate_addr
,
5288 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
5289 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
5290 .ndo_set_vf_mac
= be_set_vf_mac
,
5291 .ndo_set_vf_vlan
= be_set_vf_vlan
,
5292 .ndo_set_vf_rate
= be_set_vf_tx_rate
,
5293 .ndo_get_vf_config
= be_get_vf_config
,
5294 .ndo_set_vf_link_state
= be_set_vf_link_state
,
5295 .ndo_set_vf_spoofchk
= be_set_vf_spoofchk
,
5296 #ifdef CONFIG_NET_POLL_CONTROLLER
5297 .ndo_poll_controller
= be_netpoll
,
5299 .ndo_bridge_setlink
= be_ndo_bridge_setlink
,
5300 .ndo_bridge_getlink
= be_ndo_bridge_getlink
,
5301 #ifdef CONFIG_NET_RX_BUSY_POLL
5302 .ndo_busy_poll
= be_busy_poll
,
5304 #ifdef CONFIG_BE2NET_VXLAN
5305 .ndo_add_vxlan_port
= be_add_vxlan_port
,
5306 .ndo_del_vxlan_port
= be_del_vxlan_port
,
5307 .ndo_features_check
= be_features_check
,
5311 static void be_netdev_init(struct net_device
*netdev
)
5313 struct be_adapter
*adapter
= netdev_priv(netdev
);
5315 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
5316 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
5317 NETIF_F_HW_VLAN_CTAG_TX
;
5318 if (be_multi_rxq(adapter
))
5319 netdev
->hw_features
|= NETIF_F_RXHASH
;
5321 netdev
->features
|= netdev
->hw_features
|
5322 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_FILTER
;
5324 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
5325 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
5327 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5329 netdev
->flags
|= IFF_MULTICAST
;
5331 netif_set_gso_max_size(netdev
, 65535 - ETH_HLEN
);
5333 netdev
->netdev_ops
= &be_netdev_ops
;
5335 netdev
->ethtool_ops
= &be_ethtool_ops
;
5338 static void be_cleanup(struct be_adapter
*adapter
)
5340 struct net_device
*netdev
= adapter
->netdev
;
5343 netif_device_detach(netdev
);
5344 if (netif_running(netdev
))
5351 static int be_resume(struct be_adapter
*adapter
)
5353 struct net_device
*netdev
= adapter
->netdev
;
5356 status
= be_setup(adapter
);
5360 if (netif_running(netdev
)) {
5361 status
= be_open(netdev
);
5366 netif_device_attach(netdev
);
5371 static int be_err_recover(struct be_adapter
*adapter
)
5373 struct device
*dev
= &adapter
->pdev
->dev
;
5376 status
= be_resume(adapter
);
5380 dev_info(dev
, "Adapter recovery successful\n");
5383 if (be_physfn(adapter
))
5384 dev_err(dev
, "Adapter recovery failed\n");
5386 dev_err(dev
, "Re-trying adapter recovery\n");
5391 static void be_err_detection_task(struct work_struct
*work
)
5393 struct be_adapter
*adapter
=
5394 container_of(work
, struct be_adapter
,
5395 be_err_detection_work
.work
);
5398 be_detect_error(adapter
);
5400 if (be_check_error(adapter
, BE_ERROR_HW
)) {
5401 be_cleanup(adapter
);
5403 /* As of now error recovery support is in Lancer only */
5404 if (lancer_chip(adapter
))
5405 status
= be_err_recover(adapter
);
5408 /* Always attempt recovery on VFs */
5409 if (!status
|| be_virtfn(adapter
))
5410 be_schedule_err_detection(adapter
);
5413 static void be_log_sfp_info(struct be_adapter
*adapter
)
5417 status
= be_cmd_query_sfp_info(adapter
);
5419 dev_err(&adapter
->pdev
->dev
,
5420 "Unqualified SFP+ detected on %c from %s part no: %s",
5421 adapter
->port_name
, adapter
->phy
.vendor_name
,
5422 adapter
->phy
.vendor_pn
);
5424 adapter
->flags
&= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP
;
5427 static void be_worker(struct work_struct
*work
)
5429 struct be_adapter
*adapter
=
5430 container_of(work
, struct be_adapter
, work
.work
);
5431 struct be_rx_obj
*rxo
;
5434 /* when interrupts are not yet enabled, just reap any pending
5437 if (!netif_running(adapter
->netdev
)) {
5439 be_process_mcc(adapter
);
5444 if (!adapter
->stats_cmd_sent
) {
5445 if (lancer_chip(adapter
))
5446 lancer_cmd_get_pport_stats(adapter
,
5447 &adapter
->stats_cmd
);
5449 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
5452 if (be_physfn(adapter
) &&
5453 MODULO(adapter
->work_counter
, adapter
->be_get_temp_freq
) == 0)
5454 be_cmd_get_die_temperature(adapter
);
5456 for_all_rx_queues(adapter
, rxo
, i
) {
5457 /* Replenish RX-queues starved due to memory
5458 * allocation failures.
5460 if (rxo
->rx_post_starved
)
5461 be_post_rx_frags(rxo
, GFP_KERNEL
, MAX_RX_POST
);
5464 /* EQ-delay update for Skyhawk is done while notifying EQ */
5465 if (!skyhawk_chip(adapter
))
5466 be_eqd_update(adapter
, false);
5468 if (adapter
->flags
& BE_FLAGS_EVT_INCOMPATIBLE_SFP
)
5469 be_log_sfp_info(adapter
);
5472 adapter
->work_counter
++;
5473 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
5476 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
5479 pci_iounmap(adapter
->pdev
, adapter
->csr
);
5481 pci_iounmap(adapter
->pdev
, adapter
->db
);
5484 static int db_bar(struct be_adapter
*adapter
)
5486 if (lancer_chip(adapter
) || be_virtfn(adapter
))
5492 static int be_roce_map_pci_bars(struct be_adapter
*adapter
)
5494 if (skyhawk_chip(adapter
)) {
5495 adapter
->roce_db
.size
= 4096;
5496 adapter
->roce_db
.io_addr
= pci_resource_start(adapter
->pdev
,
5498 adapter
->roce_db
.total_size
= pci_resource_len(adapter
->pdev
,
5504 static int be_map_pci_bars(struct be_adapter
*adapter
)
5506 struct pci_dev
*pdev
= adapter
->pdev
;
5510 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
5511 adapter
->sli_family
= (sli_intf
& SLI_INTF_FAMILY_MASK
) >>
5512 SLI_INTF_FAMILY_SHIFT
;
5513 adapter
->virtfn
= (sli_intf
& SLI_INTF_FT_MASK
) ? 1 : 0;
5515 if (BEx_chip(adapter
) && be_physfn(adapter
)) {
5516 adapter
->csr
= pci_iomap(pdev
, 2, 0);
5521 addr
= pci_iomap(pdev
, db_bar(adapter
), 0);
5526 if (skyhawk_chip(adapter
) || BEx_chip(adapter
)) {
5527 if (be_physfn(adapter
)) {
5528 /* PCICFG is the 2nd BAR in BE2 */
5529 addr
= pci_iomap(pdev
, BE2_chip(adapter
) ? 1 : 0, 0);
5532 adapter
->pcicfg
= addr
;
5534 adapter
->pcicfg
= adapter
->db
+ SRIOV_VF_PCICFG_OFFSET
;
5538 be_roce_map_pci_bars(adapter
);
5542 dev_err(&pdev
->dev
, "Error in mapping PCI BARs\n");
5543 be_unmap_pci_bars(adapter
);
5547 static void be_drv_cleanup(struct be_adapter
*adapter
)
5549 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
5550 struct device
*dev
= &adapter
->pdev
->dev
;
5553 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5555 mem
= &adapter
->rx_filter
;
5557 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5559 mem
= &adapter
->stats_cmd
;
5561 dma_free_coherent(dev
, mem
->size
, mem
->va
, mem
->dma
);
5564 /* Allocate and initialize various fields in be_adapter struct */
5565 static int be_drv_init(struct be_adapter
*adapter
)
5567 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
5568 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
5569 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
5570 struct be_dma_mem
*stats_cmd
= &adapter
->stats_cmd
;
5571 struct device
*dev
= &adapter
->pdev
->dev
;
5574 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
5575 mbox_mem_alloc
->va
= dma_zalloc_coherent(dev
, mbox_mem_alloc
->size
,
5576 &mbox_mem_alloc
->dma
,
5578 if (!mbox_mem_alloc
->va
)
5581 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
5582 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
5583 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
5585 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
5586 rx_filter
->va
= dma_zalloc_coherent(dev
, rx_filter
->size
,
5587 &rx_filter
->dma
, GFP_KERNEL
);
5588 if (!rx_filter
->va
) {
5593 if (lancer_chip(adapter
))
5594 stats_cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
5595 else if (BE2_chip(adapter
))
5596 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
5597 else if (BE3_chip(adapter
))
5598 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
5600 stats_cmd
->size
= sizeof(struct be_cmd_req_get_stats_v2
);
5601 stats_cmd
->va
= dma_zalloc_coherent(dev
, stats_cmd
->size
,
5602 &stats_cmd
->dma
, GFP_KERNEL
);
5603 if (!stats_cmd
->va
) {
5605 goto free_rx_filter
;
5608 mutex_init(&adapter
->mbox_lock
);
5609 spin_lock_init(&adapter
->mcc_lock
);
5610 spin_lock_init(&adapter
->mcc_cq_lock
);
5611 init_completion(&adapter
->et_cmd_compl
);
5613 pci_save_state(adapter
->pdev
);
5615 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
5616 INIT_DELAYED_WORK(&adapter
->be_err_detection_work
,
5617 be_err_detection_task
);
5619 adapter
->rx_fc
= true;
5620 adapter
->tx_fc
= true;
5622 /* Must be a power of 2 or else MODULO will BUG_ON */
5623 adapter
->be_get_temp_freq
= 64;
5628 dma_free_coherent(dev
, rx_filter
->size
, rx_filter
->va
, rx_filter
->dma
);
5630 dma_free_coherent(dev
, mbox_mem_alloc
->size
, mbox_mem_alloc
->va
,
5631 mbox_mem_alloc
->dma
);
5635 static void be_remove(struct pci_dev
*pdev
)
5637 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5642 be_roce_dev_remove(adapter
);
5643 be_intr_set(adapter
, false);
5645 be_cancel_err_detection(adapter
);
5647 unregister_netdev(adapter
->netdev
);
5651 /* tell fw we're done with firing cmds */
5652 be_cmd_fw_clean(adapter
);
5654 be_unmap_pci_bars(adapter
);
5655 be_drv_cleanup(adapter
);
5657 pci_disable_pcie_error_reporting(pdev
);
5659 pci_release_regions(pdev
);
5660 pci_disable_device(pdev
);
5662 free_netdev(adapter
->netdev
);
5665 static ssize_t
be_hwmon_show_temp(struct device
*dev
,
5666 struct device_attribute
*dev_attr
,
5669 struct be_adapter
*adapter
= dev_get_drvdata(dev
);
5671 /* Unit: millidegree Celsius */
5672 if (adapter
->hwmon_info
.be_on_die_temp
== BE_INVALID_DIE_TEMP
)
5675 return sprintf(buf
, "%u\n",
5676 adapter
->hwmon_info
.be_on_die_temp
* 1000);
5679 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
,
5680 be_hwmon_show_temp
, NULL
, 1);
5682 static struct attribute
*be_hwmon_attrs
[] = {
5683 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
5687 ATTRIBUTE_GROUPS(be_hwmon
);
5689 static char *mc_name(struct be_adapter
*adapter
)
5691 char *str
= ""; /* default */
5693 switch (adapter
->mc_type
) {
5719 static inline char *func_name(struct be_adapter
*adapter
)
5721 return be_physfn(adapter
) ? "PF" : "VF";
5724 static inline char *nic_name(struct pci_dev
*pdev
)
5726 switch (pdev
->device
) {
5733 return OC_NAME_LANCER
;
5744 static int be_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pdev_id
)
5746 struct be_adapter
*adapter
;
5747 struct net_device
*netdev
;
5750 dev_info(&pdev
->dev
, "%s version is %s\n", DRV_NAME
, DRV_VER
);
5752 status
= pci_enable_device(pdev
);
5756 status
= pci_request_regions(pdev
, DRV_NAME
);
5759 pci_set_master(pdev
);
5761 netdev
= alloc_etherdev_mqs(sizeof(*adapter
), MAX_TX_QS
, MAX_RX_QS
);
5766 adapter
= netdev_priv(netdev
);
5767 adapter
->pdev
= pdev
;
5768 pci_set_drvdata(pdev
, adapter
);
5769 adapter
->netdev
= netdev
;
5770 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5772 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5774 netdev
->features
|= NETIF_F_HIGHDMA
;
5776 status
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5778 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
5783 status
= pci_enable_pcie_error_reporting(pdev
);
5785 dev_info(&pdev
->dev
, "PCIe error reporting enabled\n");
5787 status
= be_map_pci_bars(adapter
);
5791 status
= be_drv_init(adapter
);
5795 status
= be_setup(adapter
);
5799 be_netdev_init(netdev
);
5800 status
= register_netdev(netdev
);
5804 be_roce_dev_add(adapter
);
5806 be_schedule_err_detection(adapter
);
5808 /* On Die temperature not supported for VF. */
5809 if (be_physfn(adapter
) && IS_ENABLED(CONFIG_BE2NET_HWMON
)) {
5810 adapter
->hwmon_info
.hwmon_dev
=
5811 devm_hwmon_device_register_with_groups(&pdev
->dev
,
5815 adapter
->hwmon_info
.be_on_die_temp
= BE_INVALID_DIE_TEMP
;
5818 dev_info(&pdev
->dev
, "%s: %s %s port %c\n", nic_name(pdev
),
5819 func_name(adapter
), mc_name(adapter
), adapter
->port_name
);
5826 be_drv_cleanup(adapter
);
5828 be_unmap_pci_bars(adapter
);
5830 free_netdev(netdev
);
5832 pci_release_regions(pdev
);
5834 pci_disable_device(pdev
);
5836 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
5840 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5842 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5844 if (adapter
->wol_en
)
5845 be_setup_wol(adapter
, true);
5847 be_intr_set(adapter
, false);
5848 be_cancel_err_detection(adapter
);
5850 be_cleanup(adapter
);
5852 pci_save_state(pdev
);
5853 pci_disable_device(pdev
);
5854 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5858 static int be_pci_resume(struct pci_dev
*pdev
)
5860 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5863 status
= pci_enable_device(pdev
);
5867 pci_set_power_state(pdev
, PCI_D0
);
5868 pci_restore_state(pdev
);
5870 status
= be_resume(adapter
);
5874 be_schedule_err_detection(adapter
);
5876 if (adapter
->wol_en
)
5877 be_setup_wol(adapter
, false);
5883 * An FLR will stop BE from DMAing any data.
5885 static void be_shutdown(struct pci_dev
*pdev
)
5887 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5892 be_roce_dev_shutdown(adapter
);
5893 cancel_delayed_work_sync(&adapter
->work
);
5894 be_cancel_err_detection(adapter
);
5896 netif_device_detach(adapter
->netdev
);
5898 be_cmd_reset_function(adapter
);
5900 pci_disable_device(pdev
);
5903 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
5904 pci_channel_state_t state
)
5906 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5908 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
5910 if (!be_check_error(adapter
, BE_ERROR_EEH
)) {
5911 be_set_error(adapter
, BE_ERROR_EEH
);
5913 be_cancel_err_detection(adapter
);
5915 be_cleanup(adapter
);
5918 if (state
== pci_channel_io_perm_failure
)
5919 return PCI_ERS_RESULT_DISCONNECT
;
5921 pci_disable_device(pdev
);
5923 /* The error could cause the FW to trigger a flash debug dump.
5924 * Resetting the card while flash dump is in progress
5925 * can cause it not to recover; wait for it to finish.
5926 * Wait only for first function as it is needed only once per
5929 if (pdev
->devfn
== 0)
5932 return PCI_ERS_RESULT_NEED_RESET
;
5935 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
5937 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5940 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
5942 status
= pci_enable_device(pdev
);
5944 return PCI_ERS_RESULT_DISCONNECT
;
5946 pci_set_master(pdev
);
5947 pci_set_power_state(pdev
, PCI_D0
);
5948 pci_restore_state(pdev
);
5950 /* Check if card is ok and fw is ready */
5951 dev_info(&adapter
->pdev
->dev
,
5952 "Waiting for FW to be ready after EEH reset\n");
5953 status
= be_fw_wait_ready(adapter
);
5955 return PCI_ERS_RESULT_DISCONNECT
;
5957 pci_cleanup_aer_uncorrect_error_status(pdev
);
5958 be_clear_error(adapter
, BE_CLEAR_ALL
);
5959 return PCI_ERS_RESULT_RECOVERED
;
5962 static void be_eeh_resume(struct pci_dev
*pdev
)
5965 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5967 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
5969 pci_save_state(pdev
);
5971 status
= be_resume(adapter
);
5975 be_schedule_err_detection(adapter
);
5978 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
5981 static int be_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
5983 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
5988 be_vf_clear(adapter
);
5990 adapter
->num_vfs
= num_vfs
;
5992 if (adapter
->num_vfs
== 0 && pci_vfs_assigned(pdev
)) {
5993 dev_warn(&pdev
->dev
,
5994 "Cannot disable VFs while they are assigned\n");
5998 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5999 * are equally distributed across the max-number of VFs. The user may
6000 * request only a subset of the max-vfs to be enabled.
6001 * Based on num_vfs, redistribute the resources across num_vfs so that
6002 * each VF will have access to more number of resources.
6003 * This facility is not available in BE3 FW.
6004 * Also, this is done by FW in Lancer chip.
6006 if (skyhawk_chip(adapter
) && !pci_num_vf(pdev
)) {
6007 num_vf_qs
= be_calculate_vf_qs(adapter
, adapter
->num_vfs
);
6008 status
= be_cmd_set_sriov_config(adapter
, adapter
->pool_res
,
6009 adapter
->num_vfs
, num_vf_qs
);
6012 "Failed to optimize SR-IOV resources\n");
6015 status
= be_get_resources(adapter
);
6017 return be_cmd_status(status
);
6019 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6021 status
= be_update_queues(adapter
);
6024 return be_cmd_status(status
);
6026 if (adapter
->num_vfs
)
6027 status
= be_vf_setup(adapter
);
6030 return adapter
->num_vfs
;
6035 static const struct pci_error_handlers be_eeh_handlers
= {
6036 .error_detected
= be_eeh_err_detected
,
6037 .slot_reset
= be_eeh_reset
,
6038 .resume
= be_eeh_resume
,
6041 static struct pci_driver be_driver
= {
6043 .id_table
= be_dev_ids
,
6045 .remove
= be_remove
,
6046 .suspend
= be_suspend
,
6047 .resume
= be_pci_resume
,
6048 .shutdown
= be_shutdown
,
6049 .sriov_configure
= be_pci_sriov_configure
,
6050 .err_handler
= &be_eeh_handlers
6053 static int __init
be_init_module(void)
6055 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
6056 rx_frag_size
!= 2048) {
6057 printk(KERN_WARNING DRV_NAME
6058 " : Module param rx_frag_size must be 2048/4096/8192."
6060 rx_frag_size
= 2048;
6064 pr_info(DRV_NAME
" : Module param num_vfs is obsolete.");
6065 pr_info(DRV_NAME
" : Use sysfs method to enable VFs\n");
6068 return pci_register_driver(&be_driver
);
6070 module_init(be_init_module
);
6072 static void __exit
be_exit_module(void)
6074 pci_unregister_driver(&be_driver
);
6076 module_exit(be_exit_module
);