1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
28 /* This is used as a replacement for an MCP if it's not present */
29 extern int load_count
[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
31 extern int num_queues
;
34 /************************ Macros ********************************/
35 #define BNX2X_PCI_FREE(x, y, size) \
38 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
44 #define BNX2X_FREE(x) \
52 #define BNX2X_PCI_ALLOC(x, y, size) \
54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
57 memset((void *)x, 0, size); \
60 #define BNX2X_ALLOC(x, size) \
62 x = kzalloc(size, GFP_KERNEL); \
67 /*********************** Interfaces ****************************
68 * Functions that need to be implemented by each driver version
73 * bnx2x_send_unload_req - request unload mode from the MCP.
76 * @unload_mode: requested function's unload mode
78 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
80 u32
bnx2x_send_unload_req(struct bnx2x
*bp
, int unload_mode
);
83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
87 void bnx2x_send_unload_done(struct bnx2x
*bp
);
90 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
93 * @rss_obj: RSS object to use
94 * @ind_table: indirection table to configure
95 * @config_hash: re-configure RSS hash keys configuration
97 int bnx2x_config_rss_pf(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
101 * bnx2x__init_func_obj - init function object
105 * Initializes the Function Object with the appropriate
106 * parameters which include a function slow path driver
109 void bnx2x__init_func_obj(struct bnx2x
*bp
);
112 * bnx2x_setup_queue - setup eth queue.
115 * @fp: pointer to the fastpath structure
119 int bnx2x_setup_queue(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
123 * bnx2x_setup_leading - bring up a leading eth queue.
127 int bnx2x_setup_leading(struct bnx2x
*bp
);
130 * bnx2x_fw_command - send the MCP a request
134 * @param: request's parameter
136 * block until there is a reply
138 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
);
141 * bnx2x_initial_phy_init - initialize link parameters structure variables.
144 * @load_mode: current mode
146 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
);
149 * bnx2x_link_set - configure hw according to link parameters structure.
153 void bnx2x_link_set(struct bnx2x
*bp
);
156 * bnx2x_link_test - query link status.
161 * Returns 0 if link is UP.
163 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
);
166 * bnx2x_drv_pulse - write driver pulse to shmem
170 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
173 void bnx2x_drv_pulse(struct bnx2x
*bp
);
176 * bnx2x_igu_ack_sb - update IGU with current SB value
180 * @segment: SB segment
183 * @update: is HW update required
185 void bnx2x_igu_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 segment
,
186 u16 index
, u8 op
, u8 update
);
188 /* Disable transactions from chip to host */
189 void bnx2x_pf_disable(struct bnx2x
*bp
);
192 * bnx2x__link_status_update - handles link status change.
196 void bnx2x__link_status_update(struct bnx2x
*bp
);
199 * bnx2x_link_report - report link status to upper layer.
203 void bnx2x_link_report(struct bnx2x
*bp
);
205 /* None-atomic version of bnx2x_link_report() */
206 void __bnx2x_link_report(struct bnx2x
*bp
);
209 * bnx2x_get_mf_speed - calculate MF speed.
213 * Takes into account current linespeed and MF configuration.
215 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
);
218 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
221 * @dev_instance: private instance
223 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
);
226 * bnx2x_interrupt - non MSI-X interrupt handler
229 * @dev_instance: private instance
231 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
);
235 * bnx2x_cnic_notify - send command to cnic driver
240 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
243 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
247 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
250 * bnx2x_setup_cnic_info - provides cnic with updated info
254 void bnx2x_setup_cnic_info(struct bnx2x
*bp
);
259 * bnx2x_int_enable - enable HW interrupts.
263 void bnx2x_int_enable(struct bnx2x
*bp
);
266 * bnx2x_int_disable_sync - disable interrupts.
269 * @disable_hw: true, disable HW interrupts.
271 * This function ensures that there are no
272 * ISRs or SP DPCs (sp_task) are running after it returns.
274 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
);
277 * bnx2x_nic_init - init driver internals.
280 * @load_code: COMMON, PORT or FUNCTION
287 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
);
290 * bnx2x_alloc_mem - allocate driver's memory.
294 int bnx2x_alloc_mem(struct bnx2x
*bp
);
297 * bnx2x_free_mem - release driver's memory.
301 void bnx2x_free_mem(struct bnx2x
*bp
);
304 * bnx2x_set_num_queues - set number of queues according to mode.
308 void bnx2x_set_num_queues(struct bnx2x
*bp
);
311 * bnx2x_chip_cleanup - cleanup chip internals.
314 * @unload_mode: COMMON, PORT, FUNCTION
316 * - Cleanup MAC configuration.
320 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
);
323 * bnx2x_acquire_hw_lock - acquire HW lock.
326 * @resource: resource bit which was locked
328 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
);
331 * bnx2x_release_hw_lock - release HW lock.
334 * @resource: resource bit which was locked
336 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
);
339 * bnx2x_release_leader_lock - release recovery leader lock
343 int bnx2x_release_leader_lock(struct bnx2x
*bp
);
346 * bnx2x_set_eth_mac - configure eth MAC address in the HW
351 * Configures according to the value in netdev->dev_addr.
353 int bnx2x_set_eth_mac(struct bnx2x
*bp
, bool set
);
356 * bnx2x_set_rx_mode - set MAC filtering configurations.
360 * called with netif_tx_lock from dev_mcast.c
361 * If bp->state is OPEN, should be called with
362 * netif_addr_lock_bh()
364 void bnx2x_set_rx_mode(struct net_device
*dev
);
367 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
371 * If bp->state is OPEN, should be called with
372 * netif_addr_lock_bh().
374 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
377 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
381 * @rx_mode_flags: rx mode configuration
382 * @rx_accept_flags: rx accept configuration
383 * @tx_accept_flags: tx accept configuration (tx switch)
384 * @ramrod_flags: ramrod configuration
386 void bnx2x_set_q_rx_mode(struct bnx2x
*bp
, u8 cl_id
,
387 unsigned long rx_mode_flags
,
388 unsigned long rx_accept_flags
,
389 unsigned long tx_accept_flags
,
390 unsigned long ramrod_flags
);
392 /* Parity errors related */
393 void bnx2x_set_pf_load(struct bnx2x
*bp
);
394 bool bnx2x_clear_pf_load(struct bnx2x
*bp
);
395 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
, bool *global
, bool print
);
396 bool bnx2x_reset_is_done(struct bnx2x
*bp
, int engine
);
397 void bnx2x_set_reset_in_progress(struct bnx2x
*bp
);
398 void bnx2x_set_reset_global(struct bnx2x
*bp
);
399 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
);
402 * bnx2x_sp_event - handle ramrods completion.
404 * @fp: fastpath handle for the event
405 * @rr_cqe: eth_rx_cqe
407 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
, union eth_rx_cqe
*rr_cqe
);
410 * bnx2x_ilt_set_info - prepare ILT configurations.
414 void bnx2x_ilt_set_info(struct bnx2x
*bp
);
417 * bnx2x_dcbx_init - initialize dcbx protocol.
421 void bnx2x_dcbx_init(struct bnx2x
*bp
, bool update_shmem
);
424 * bnx2x_set_power_state - set power state to the requested value.
427 * @state: required state D0 or D3hot
429 * Currently only D0 and D3hot are supported.
431 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
);
434 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
439 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
);
441 void bnx2x_panic_dump(struct bnx2x
*bp
);
443 void bnx2x_fw_dump_lvl(struct bnx2x
*bp
, const char *lvl
);
445 /* validate currect fw is loaded */
446 bool bnx2x_test_firmware_version(struct bnx2x
*bp
, bool is_err
);
448 /* dev_close main block */
449 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
);
451 /* dev_open main block */
452 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
);
454 /* hard_xmit callback */
455 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
457 /* setup_tc callback */
458 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
);
460 /* select_queue callback */
461 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
);
464 int bnx2x_reload_if_running(struct net_device
*dev
);
466 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
);
468 /* NAPI poll Rx part */
469 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
);
471 void bnx2x_update_rx_prod(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
472 u16 bd_prod
, u16 rx_comp_prod
, u16 rx_sge_prod
);
474 /* NAPI poll Tx part */
475 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
);
477 /* suspend/resume callbacks */
478 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
);
479 int bnx2x_resume(struct pci_dev
*pdev
);
481 /* Release IRQ vectors */
482 void bnx2x_free_irq(struct bnx2x
*bp
);
484 void bnx2x_free_fp_mem(struct bnx2x
*bp
);
485 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
);
486 void bnx2x_init_rx_rings(struct bnx2x
*bp
);
487 void bnx2x_free_skbs(struct bnx2x
*bp
);
488 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
);
489 void bnx2x_netif_start(struct bnx2x
*bp
);
492 * bnx2x_enable_msix - set msix configuration.
496 * fills msix_table, requests vectors, updates num_queues
497 * according to number of available vectors.
499 int bnx2x_enable_msix(struct bnx2x
*bp
);
502 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
506 int bnx2x_enable_msi(struct bnx2x
*bp
);
509 * bnx2x_poll - NAPI callback
511 * @napi: napi structure
515 int bnx2x_poll(struct napi_struct
*napi
, int budget
);
518 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
522 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
);
525 * bnx2x_free_mem_bp - release memories outsize main driver structure
529 void bnx2x_free_mem_bp(struct bnx2x
*bp
);
532 * bnx2x_change_mtu - change mtu netdev callback
535 * @new_mtu: requested mtu
538 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
);
540 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
542 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
545 * @wwn: output buffer
546 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
549 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
);
552 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
553 netdev_features_t features
);
554 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
);
557 * bnx2x_tx_timeout - tx timeout netdev callback
561 void bnx2x_tx_timeout(struct net_device
*dev
);
563 /*********************** Inlines **********************************/
564 /*********************** Fast path ********************************/
565 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
567 barrier(); /* status block is written to by the chip */
568 fp
->fp_hc_idx
= fp
->sb_running_index
[SM_RX_ID
];
571 static inline void bnx2x_update_rx_prod_gen(struct bnx2x
*bp
,
572 struct bnx2x_fastpath
*fp
, u16 bd_prod
,
573 u16 rx_comp_prod
, u16 rx_sge_prod
, u32 start
)
575 struct ustorm_eth_rx_producers rx_prods
= {0};
578 /* Update producers */
579 rx_prods
.bd_prod
= bd_prod
;
580 rx_prods
.cqe_prod
= rx_comp_prod
;
581 rx_prods
.sge_prod
= rx_sge_prod
;
584 * Make sure that the BD and SGE data is updated before updating the
585 * producers since FW might read the BD/SGE right after the producer
587 * This is only applicable for weak-ordered memory model archs such
588 * as IA-64. The following barrier is also mandatory since FW will
589 * assumes BDs must have buffers.
593 for (i
= 0; i
< sizeof(rx_prods
)/4; i
++)
594 REG_WR(bp
, start
+ i
*4, ((u32
*)&rx_prods
)[i
]);
596 mmiowb(); /* keep prod updates ordered */
598 DP(NETIF_MSG_RX_STATUS
,
599 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
600 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
603 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x
*bp
, u8 igu_sb_id
,
604 u8 segment
, u16 index
, u8 op
,
605 u8 update
, u32 igu_addr
)
607 struct igu_regular cmd_data
= {0};
609 cmd_data
.sb_id_and_flags
=
610 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
611 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
612 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
613 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
615 DP(NETIF_MSG_INTR
, "write 0x%08x to IGU addr 0x%x\n",
616 cmd_data
.sb_id_and_flags
, igu_addr
);
617 REG_WR(bp
, igu_addr
, cmd_data
.sb_id_and_flags
);
619 /* Make sure that ACK is written */
624 static inline void bnx2x_hc_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
625 u8 storm
, u16 index
, u8 op
, u8 update
)
627 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
628 COMMAND_REG_INT_ACK
);
629 struct igu_ack_register igu_ack
;
631 igu_ack
.status_block_index
= index
;
632 igu_ack
.sb_id_and_flags
=
633 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
634 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
635 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
636 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
638 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
640 /* Make sure that ACK is written */
645 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 storm
,
646 u16 index
, u8 op
, u8 update
)
648 if (bp
->common
.int_block
== INT_BLOCK_HC
)
649 bnx2x_hc_ack_sb(bp
, igu_sb_id
, storm
, index
, op
, update
);
653 if (CHIP_INT_MODE_IS_BC(bp
))
655 else if (igu_sb_id
!= bp
->igu_dsb_id
)
656 segment
= IGU_SEG_ACCESS_DEF
;
657 else if (storm
== ATTENTION_ID
)
658 segment
= IGU_SEG_ACCESS_ATTN
;
660 segment
= IGU_SEG_ACCESS_DEF
;
661 bnx2x_igu_ack_sb(bp
, igu_sb_id
, segment
, index
, op
, update
);
665 static inline u16
bnx2x_hc_ack_int(struct bnx2x
*bp
)
667 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
668 COMMAND_REG_SIMD_MASK
);
669 u32 result
= REG_RD(bp
, hc_addr
);
675 static inline u16
bnx2x_igu_ack_int(struct bnx2x
*bp
)
677 u32 igu_addr
= (BAR_IGU_INTMEM
+ IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
*8);
678 u32 result
= REG_RD(bp
, igu_addr
);
680 DP(NETIF_MSG_INTR
, "read 0x%08x from IGU addr 0x%x\n",
687 static inline u16
bnx2x_ack_int(struct bnx2x
*bp
)
690 if (bp
->common
.int_block
== INT_BLOCK_HC
)
691 return bnx2x_hc_ack_int(bp
);
693 return bnx2x_igu_ack_int(bp
);
696 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata
*txdata
)
698 /* Tell compiler that consumer and producer can change */
700 return txdata
->tx_pkt_prod
!= txdata
->tx_pkt_cons
;
703 static inline u16
bnx2x_tx_avail(struct bnx2x
*bp
,
704 struct bnx2x_fp_txdata
*txdata
)
710 prod
= txdata
->tx_bd_prod
;
711 cons
= txdata
->tx_bd_cons
;
713 used
= SUB_S16(prod
, cons
);
715 #ifdef BNX2X_STOP_ON_ERROR
717 WARN_ON(used
> txdata
->tx_ring_size
);
718 WARN_ON((txdata
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
721 return (s16
)(txdata
->tx_ring_size
) - used
;
724 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata
*txdata
)
728 /* Tell compiler that status block fields can change */
730 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
731 return hw_cons
!= txdata
->tx_pkt_cons
;
734 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
737 for_each_cos_in_tx_queue(fp
, cos
)
738 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
743 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
747 /* Tell compiler that status block fields can change */
749 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
750 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
752 return (fp
->rx_comp_cons
!= rx_cons_sb
);
756 * bnx2x_tx_disable - disables tx from stack point of view
760 static inline void bnx2x_tx_disable(struct bnx2x
*bp
)
762 netif_tx_disable(bp
->dev
);
763 netif_carrier_off(bp
->dev
);
766 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
767 struct bnx2x_fastpath
*fp
, u16 index
)
769 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
770 struct page
*page
= sw_buf
->page
;
771 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
773 /* Skip "next page" elements */
777 dma_unmap_page(&bp
->pdev
->dev
, dma_unmap_addr(sw_buf
, mapping
),
778 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
779 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
786 static inline void bnx2x_add_all_napi(struct bnx2x
*bp
)
790 bp
->num_napi_queues
= bp
->num_queues
;
792 /* Add NAPI objects */
793 for_each_rx_queue(bp
, i
)
794 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
795 bnx2x_poll
, BNX2X_NAPI_WEIGHT
);
798 static inline void bnx2x_del_all_napi(struct bnx2x
*bp
)
802 for_each_rx_queue(bp
, i
)
803 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
806 void bnx2x_set_int_mode(struct bnx2x
*bp
);
808 static inline void bnx2x_disable_msi(struct bnx2x
*bp
)
810 if (bp
->flags
& USING_MSIX_FLAG
) {
811 pci_disable_msix(bp
->pdev
);
812 bp
->flags
&= ~(USING_MSIX_FLAG
| USING_SINGLE_MSIX_FLAG
);
813 } else if (bp
->flags
& USING_MSI_FLAG
) {
814 pci_disable_msi(bp
->pdev
);
815 bp
->flags
&= ~USING_MSI_FLAG
;
819 static inline int bnx2x_calc_num_queues(struct bnx2x
*bp
)
822 min_t(int, num_queues
, BNX2X_MAX_QUEUES(bp
)) :
823 min_t(int, netif_get_num_default_rss_queues(),
824 BNX2X_MAX_QUEUES(bp
));
827 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
831 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
832 int idx
= RX_SGE_CNT
* i
- 1;
834 for (j
= 0; j
< 2; j
++) {
835 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
, idx
);
841 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
843 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
844 memset(fp
->sge_mask
, 0xff, sizeof(fp
->sge_mask
));
846 /* Clear the two last indices in the page to 1:
847 these are the indices that correspond to the "next" element,
848 hence will never be indicated and should be removed from
850 bnx2x_clear_sge_mask_next_elems(fp
);
853 /* note that we are not allocating a new buffer,
854 * we are just moving one from cons to prod
855 * we are not creating a new mapping,
856 * so there is no need to check for dma_mapping_error().
858 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath
*fp
,
861 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
862 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
863 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
864 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
866 dma_unmap_addr_set(prod_rx_buf
, mapping
,
867 dma_unmap_addr(cons_rx_buf
, mapping
));
868 prod_rx_buf
->data
= cons_rx_buf
->data
;
872 /************************* Init ******************************************/
874 /* returns func by VN for current port */
875 static inline int func_by_vn(struct bnx2x
*bp
, int vn
)
877 return 2 * vn
+ BP_PORT(bp
);
880 static inline int bnx2x_config_rss_eth(struct bnx2x
*bp
, bool config_hash
)
882 return bnx2x_config_rss_pf(bp
, &bp
->rss_conf_obj
, config_hash
);
886 * bnx2x_func_start - init function
890 * Must be called before sending CLIENT_SETUP for the first client.
892 static inline int bnx2x_func_start(struct bnx2x
*bp
)
894 struct bnx2x_func_state_params func_params
= {NULL
};
895 struct bnx2x_func_start_params
*start_params
=
896 &func_params
.params
.start
;
898 /* Prepare parameters for function state transitions */
899 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
901 func_params
.f_obj
= &bp
->func_obj
;
902 func_params
.cmd
= BNX2X_F_CMD_START
;
904 /* Function parameters */
905 start_params
->mf_mode
= bp
->mf_mode
;
906 start_params
->sd_vlan_tag
= bp
->mf_ov
;
908 if (CHIP_IS_E2(bp
) || CHIP_IS_E3(bp
))
909 start_params
->network_cos_mode
= STATIC_COS
;
910 else /* CHIP_IS_E1X */
911 start_params
->network_cos_mode
= FW_WRR
;
913 return bnx2x_func_state_change(bp
, &func_params
);
918 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
920 * @fw_hi: pointer to upper part
921 * @fw_mid: pointer to middle part
922 * @fw_lo: pointer to lower part
923 * @mac: pointer to MAC address
925 static inline void bnx2x_set_fw_mac_addr(u16
*fw_hi
, u16
*fw_mid
, u16
*fw_lo
,
928 ((u8
*)fw_hi
)[0] = mac
[1];
929 ((u8
*)fw_hi
)[1] = mac
[0];
930 ((u8
*)fw_mid
)[0] = mac
[3];
931 ((u8
*)fw_mid
)[1] = mac
[2];
932 ((u8
*)fw_lo
)[0] = mac
[5];
933 ((u8
*)fw_lo
)[1] = mac
[4];
936 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
937 struct bnx2x_fastpath
*fp
, int last
)
944 for (i
= 0; i
< last
; i
++)
945 bnx2x_free_rx_sge(bp
, fp
, i
);
948 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath
*fp
)
952 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
953 struct eth_rx_bd
*rx_bd
;
955 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
957 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
958 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
960 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
961 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
965 /* Statistics ID are global per chip/path, while Client IDs for E1x are per
968 static inline u8
bnx2x_stats_id(struct bnx2x_fastpath
*fp
)
970 struct bnx2x
*bp
= fp
->bp
;
971 if (!CHIP_IS_E1x(bp
)) {
973 /* there are special statistics counters for FCoE 136..140 */
975 return bp
->cnic_base_cl_id
+ (bp
->pf_num
>> 1);
979 return fp
->cl_id
+ BP_PORT(bp
) * FP_SB_MAX_E1x
;
982 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath
*fp
,
983 bnx2x_obj_type obj_type
)
985 struct bnx2x
*bp
= fp
->bp
;
987 /* Configure classification DBs */
988 bnx2x_init_mac_obj(bp
, &bnx2x_sp_obj(bp
, fp
).mac_obj
, fp
->cl_id
,
989 fp
->cid
, BP_FUNC(bp
), bnx2x_sp(bp
, mac_rdata
),
990 bnx2x_sp_mapping(bp
, mac_rdata
),
991 BNX2X_FILTER_MAC_PENDING
,
992 &bp
->sp_state
, obj_type
,
997 * bnx2x_get_path_func_num - get number of active functions
1001 * Calculates the number of active (not hidden) functions on the
1004 static inline u8
bnx2x_get_path_func_num(struct bnx2x
*bp
)
1008 /* 57710 has only one function per-port */
1012 /* Calculate a number of functions enabled on the current
1015 if (CHIP_REV_IS_SLOW(bp
)) {
1021 for (i
= 0; i
< E1H_FUNC_MAX
/ 2; i
++) {
1024 func_mf_config
[BP_PORT(bp
) + 2 * i
].
1027 ((func_config
& FUNC_MF_CFG_FUNC_HIDE
) ? 0 : 1);
1036 static inline void bnx2x_init_bp_objs(struct bnx2x
*bp
)
1038 /* RX_MODE controlling object */
1039 bnx2x_init_rx_mode_obj(bp
, &bp
->rx_mode_obj
);
1041 /* multicast configuration controlling object */
1042 bnx2x_init_mcast_obj(bp
, &bp
->mcast_obj
, bp
->fp
->cl_id
, bp
->fp
->cid
,
1043 BP_FUNC(bp
), BP_FUNC(bp
),
1044 bnx2x_sp(bp
, mcast_rdata
),
1045 bnx2x_sp_mapping(bp
, mcast_rdata
),
1046 BNX2X_FILTER_MCAST_PENDING
, &bp
->sp_state
,
1049 /* Setup CAM credit pools */
1050 bnx2x_init_mac_credit_pool(bp
, &bp
->macs_pool
, BP_FUNC(bp
),
1051 bnx2x_get_path_func_num(bp
));
1053 /* RSS configuration object */
1054 bnx2x_init_rss_config_obj(bp
, &bp
->rss_conf_obj
, bp
->fp
->cl_id
,
1055 bp
->fp
->cid
, BP_FUNC(bp
), BP_FUNC(bp
),
1056 bnx2x_sp(bp
, rss_rdata
),
1057 bnx2x_sp_mapping(bp
, rss_rdata
),
1058 BNX2X_FILTER_RSS_CONF_PENDING
, &bp
->sp_state
,
1062 static inline u8
bnx2x_fp_qzone_id(struct bnx2x_fastpath
*fp
)
1064 if (CHIP_IS_E1x(fp
->bp
))
1065 return fp
->cl_id
+ BP_PORT(fp
->bp
) * ETH_MAX_RX_CLIENTS_E1H
;
1070 static inline u32
bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath
*fp
)
1072 struct bnx2x
*bp
= fp
->bp
;
1074 if (!CHIP_IS_E1x(bp
))
1075 return USTORM_RX_PRODS_E2_OFFSET(fp
->cl_qzone_id
);
1077 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp
), fp
->cl_id
);
1080 static inline void bnx2x_init_txdata(struct bnx2x
*bp
,
1081 struct bnx2x_fp_txdata
*txdata
, u32 cid
,
1082 int txq_index
, __le16
*tx_cons_sb
,
1083 struct bnx2x_fastpath
*fp
)
1086 txdata
->txq_index
= txq_index
;
1087 txdata
->tx_cons_sb
= tx_cons_sb
;
1088 txdata
->parent_fp
= fp
;
1089 txdata
->tx_ring_size
= IS_FCOE_FP(fp
) ? MAX_TX_AVAIL
: bp
->tx_ring_size
;
1091 DP(NETIF_MSG_IFUP
, "created tx data cid %d, txq %d\n",
1092 txdata
->cid
, txdata
->txq_index
);
1096 static inline u8
bnx2x_cnic_eth_cl_id(struct bnx2x
*bp
, u8 cl_idx
)
1098 return bp
->cnic_base_cl_id
+ cl_idx
+
1099 (bp
->pf_num
>> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX
;
1102 static inline u8
bnx2x_cnic_fw_sb_id(struct bnx2x
*bp
)
1105 /* the 'first' id is allocated for the cnic */
1106 return bp
->base_fw_ndsb
;
1109 static inline u8
bnx2x_cnic_igu_sb_id(struct bnx2x
*bp
)
1111 return bp
->igu_base_sb
;
1115 static inline void bnx2x_init_fcoe_fp(struct bnx2x
*bp
)
1117 struct bnx2x_fastpath
*fp
= bnx2x_fcoe_fp(bp
);
1118 unsigned long q_type
= 0;
1120 bnx2x_fcoe(bp
, rx_queue
) = BNX2X_NUM_ETH_QUEUES(bp
);
1121 bnx2x_fcoe(bp
, cl_id
) = bnx2x_cnic_eth_cl_id(bp
,
1122 BNX2X_FCOE_ETH_CL_ID_IDX
);
1123 bnx2x_fcoe(bp
, cid
) = BNX2X_FCOE_ETH_CID(bp
);
1124 bnx2x_fcoe(bp
, fw_sb_id
) = DEF_SB_ID
;
1125 bnx2x_fcoe(bp
, igu_sb_id
) = bp
->igu_dsb_id
;
1126 bnx2x_fcoe(bp
, rx_cons_sb
) = BNX2X_FCOE_L2_RX_INDEX
;
1127 bnx2x_init_txdata(bp
, bnx2x_fcoe(bp
, txdata_ptr
[0]),
1128 fp
->cid
, FCOE_TXQ_IDX(bp
), BNX2X_FCOE_L2_TX_INDEX
,
1131 DP(NETIF_MSG_IFUP
, "created fcoe tx data (fp index %d)\n", fp
->index
);
1133 /* qZone id equals to FW (per path) client id */
1134 bnx2x_fcoe(bp
, cl_qzone_id
) = bnx2x_fp_qzone_id(fp
);
1136 bnx2x_fcoe(bp
, ustorm_rx_prods_offset
) =
1137 bnx2x_rx_ustorm_prods_offset(fp
);
1139 /* Configure Queue State object */
1140 __set_bit(BNX2X_Q_TYPE_HAS_RX
, &q_type
);
1141 __set_bit(BNX2X_Q_TYPE_HAS_TX
, &q_type
);
1143 /* No multi-CoS for FCoE L2 client */
1144 BUG_ON(fp
->max_cos
!= 1);
1146 bnx2x_init_queue_obj(bp
, &bnx2x_sp_obj(bp
, fp
).q_obj
, fp
->cl_id
,
1147 &fp
->cid
, 1, BP_FUNC(bp
), bnx2x_sp(bp
, q_rdata
),
1148 bnx2x_sp_mapping(bp
, q_rdata
), q_type
);
1151 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
1152 fp
->index
, bp
, fp
->status_blk
.e2_sb
, fp
->cl_id
, fp
->fw_sb_id
,
1157 static inline int bnx2x_clean_tx_queue(struct bnx2x
*bp
,
1158 struct bnx2x_fp_txdata
*txdata
)
1162 while (bnx2x_has_tx_work_unload(txdata
)) {
1164 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1165 txdata
->txq_index
, txdata
->tx_pkt_prod
,
1166 txdata
->tx_pkt_cons
);
1167 #ifdef BNX2X_STOP_ON_ERROR
1175 usleep_range(1000, 1000);
1181 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
);
1183 static inline void __storm_memset_struct(struct bnx2x
*bp
,
1184 u32 addr
, size_t size
, u32
*data
)
1187 for (i
= 0; i
< size
/4; i
++)
1188 REG_WR(bp
, addr
+ (i
* 4), data
[i
]);
1192 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1194 * @bp: driver handle
1195 * @mask: bits that need to be cleared
1197 static inline bool bnx2x_wait_sp_comp(struct bnx2x
*bp
, unsigned long mask
)
1199 int tout
= 5000; /* Wait for 5 secs tops */
1203 netif_addr_lock_bh(bp
->dev
);
1204 if (!(bp
->sp_state
& mask
)) {
1205 netif_addr_unlock_bh(bp
->dev
);
1208 netif_addr_unlock_bh(bp
->dev
);
1210 usleep_range(1000, 1000);
1215 netif_addr_lock_bh(bp
->dev
);
1216 if (bp
->sp_state
& mask
) {
1217 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1218 bp
->sp_state
, mask
);
1219 netif_addr_unlock_bh(bp
->dev
);
1222 netif_addr_unlock_bh(bp
->dev
);
1228 * bnx2x_set_ctx_validation - set CDU context validation values
1230 * @bp: driver handle
1231 * @cxt: context of the connection on the host memory
1232 * @cid: SW CID of the connection to be configured
1234 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
1237 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
1238 u8 sb_index
, u8 disable
, u16 usec
);
1239 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
);
1240 void bnx2x_release_phy_lock(struct bnx2x
*bp
);
1243 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1245 * @bp: driver handle
1246 * @mf_cfg: MF configuration
1249 static inline u16
bnx2x_extract_max_cfg(struct bnx2x
*bp
, u32 mf_cfg
)
1251 u16 max_cfg
= (mf_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1252 FUNC_MF_CFG_MAX_BW_SHIFT
;
1254 DP(NETIF_MSG_IFUP
| BNX2X_MSG_ETHTOOL
,
1255 "Max BW configured to 0 - using 100 instead\n");
1261 /* checks if HW supports GRO for given MTU */
1262 static inline bool bnx2x_mtu_allows_gro(int mtu
)
1264 /* gro frags per page */
1265 int fpp
= SGE_PAGE_SIZE
/ (mtu
- ETH_MAX_TPA_HEADER_SIZE
);
1268 * 1. number of frags should not grow above MAX_SKB_FRAGS
1269 * 2. frag must fit the page
1271 return mtu
<= SGE_PAGE_SIZE
&& (U_ETH_SGL_SIZE
* fpp
) <= MAX_SKB_FRAGS
;
1275 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1277 * @bp: driver handle
1280 void bnx2x_get_iscsi_info(struct bnx2x
*bp
);
1284 * bnx2x_link_sync_notify - send notification to other functions.
1286 * @bp: driver handle
1289 static inline void bnx2x_link_sync_notify(struct bnx2x
*bp
)
1294 /* Set the attention towards other drivers on the same port */
1295 for (vn
= VN_0
; vn
< BP_MAX_VN_NUM(bp
); vn
++) {
1296 if (vn
== BP_VN(bp
))
1299 func
= func_by_vn(bp
, vn
);
1300 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1301 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1306 * bnx2x_update_drv_flags - update flags in shmem
1308 * @bp: driver handle
1309 * @flags: flags to update
1310 * @set: set or clear
1313 static inline void bnx2x_update_drv_flags(struct bnx2x
*bp
, u32 flags
, u32 set
)
1315 if (SHMEM2_HAS(bp
, drv_flags
)) {
1317 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_DRV_FLAGS
);
1318 drv_flags
= SHMEM2_RD(bp
, drv_flags
);
1321 SET_FLAGS(drv_flags
, flags
);
1323 RESET_FLAGS(drv_flags
, flags
);
1325 SHMEM2_WR(bp
, drv_flags
, drv_flags
);
1326 DP(NETIF_MSG_IFUP
, "drv_flags 0x%08x\n", drv_flags
);
1327 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_DRV_FLAGS
);
1331 static inline bool bnx2x_is_valid_ether_addr(struct bnx2x
*bp
, u8
*addr
)
1333 if (is_valid_ether_addr(addr
))
1336 if (is_zero_ether_addr(addr
) &&
1337 (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
)))
1343 #endif /* BNX2X_CMN_H */