1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
26 extern int num_queues
;
28 /************************ Macros ********************************/
29 #define BNX2X_PCI_FREE(x, y, size) \
32 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
38 #define BNX2X_FREE(x) \
46 #define BNX2X_PCI_ALLOC(x, y, size) \
48 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
51 memset((void *)x, 0, size); \
54 #define BNX2X_ALLOC(x, size) \
56 x = kzalloc(size, GFP_KERNEL); \
61 /*********************** Interfaces ****************************
62 * Functions that need to be implemented by each driver version
66 * Initialize link parameters structure variables.
73 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
);
76 * Configure hw according to link parameters structure.
80 void bnx2x_link_set(struct bnx2x
*bp
);
88 * @return 0 - link is UP
90 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
);
93 * Handles link status change
97 void bnx2x__link_status_update(struct bnx2x
*bp
);
100 * Report link status to upper layer
104 void bnx2x_link_report(struct bnx2x
*bp
);
106 /* None-atomic version of bnx2x_link_report() */
107 void __bnx2x_link_report(struct bnx2x
*bp
);
110 * calculates MF speed according to current linespeed and MF
117 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
);
120 * MSI-X slowpath interrupt handler
123 * @param dev_instance
125 * @return irqreturn_t
127 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
);
130 * non MSI-X interrupt handler
133 * @param dev_instance
135 * @return irqreturn_t
137 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
);
141 * Send command to cnic driver
146 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
149 * Provides cnic information for proper interrupt handling
153 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
157 * Enable HW interrupts.
161 void bnx2x_int_enable(struct bnx2x
*bp
);
164 * Disable interrupts. This function ensures that there are no
165 * ISRs or SP DPCs (sp_task) are running after it returns.
168 * @param disable_hw if true, disable HW interrupts.
170 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
);
173 * Loads device firmware
179 int bnx2x_init_firmware(struct bnx2x
*bp
);
182 * Init HW blocks according to current initialization stage:
183 * COMMON, PORT or FUNCTION.
186 * @param load_code: COMMON, PORT or FUNCTION
190 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
);
193 * Init driver internals:
199 * @param load_code COMMON, PORT or FUNCTION
201 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
);
204 * Allocate driver's memory.
210 int bnx2x_alloc_mem(struct bnx2x
*bp
);
213 * Release driver's memory.
217 void bnx2x_free_mem(struct bnx2x
*bp
);
228 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
232 * Set number of queues according to mode
237 void bnx2x_set_num_queues(struct bnx2x
*bp
);
240 * Cleanup chip internals:
241 * - Cleanup MAC configuration.
248 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
);
254 * @param resource Resource bit which was locked
258 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
);
263 * @param bp driver handle
264 * @param resource Resource bit which was locked
268 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
);
271 * Configure eth MAC address in the HW according to the value in
274 * @param bp driver handle
277 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
);
281 * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
282 * MAC(s). This function will wait until the ramdord completion
285 * @param bp driver handle
286 * @param set set or clear the CAM entry
288 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
290 int bnx2x_set_fip_eth_mac_addr(struct bnx2x
*bp
, int set
);
293 * Set/Clear ALL_ENODE mcast MAC.
300 int bnx2x_set_all_enode_macs(struct bnx2x
*bp
, int set
);
304 * Set MAC filtering configurations.
306 * @remarks called with netif_tx_lock from dev_mcast.c
308 * @param dev net_device
310 void bnx2x_set_rx_mode(struct net_device
*dev
);
313 * Configure MAC filtering rules in a FW.
315 * @param bp driver handle
317 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
319 /* Parity errors related */
320 void bnx2x_inc_load_cnt(struct bnx2x
*bp
);
321 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
);
322 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
);
323 bool bnx2x_reset_is_done(struct bnx2x
*bp
);
324 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
);
327 * Perform statistics handling according to event
329 * @param bp driver handle
330 * @param event bnx2x_stats_event
332 void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
335 * Handle ramrods completion
337 * @param fp fastpath handle for the event
338 * @param rr_cqe eth_rx_cqe
340 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
, union eth_rx_cqe
*rr_cqe
);
343 * Init/halt function before/after sending
344 * CLIENT_SETUP/CFC_DEL for the first/last client.
350 int bnx2x_func_start(struct bnx2x
*bp
);
353 * Prepare ILT configurations according to current driver
358 void bnx2x_ilt_set_info(struct bnx2x
*bp
);
361 * Inintialize dcbx protocol
365 void bnx2x_dcbx_init(struct bnx2x
*bp
);
368 * Set power state to the requested value. Currently only D0 and
369 * D3hot are supported.
372 * @param state D0 or D3hot
376 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
);
379 * Updates MAX part of MF configuration in HW
385 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
);
387 /* dev_close main block */
388 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
);
390 /* dev_open main block */
391 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
);
393 /* hard_xmit callback */
394 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
396 /* select_queue callback */
397 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
);
399 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
);
401 /* NAPI poll Rx part */
402 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
);
404 /* NAPI poll Tx part */
405 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
);
407 /* suspend/resume callbacks */
408 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
);
409 int bnx2x_resume(struct pci_dev
*pdev
);
411 /* Release IRQ vectors */
412 void bnx2x_free_irq(struct bnx2x
*bp
);
414 void bnx2x_free_fp_mem(struct bnx2x
*bp
);
415 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
);
417 void bnx2x_init_rx_rings(struct bnx2x
*bp
);
418 void bnx2x_free_skbs(struct bnx2x
*bp
);
419 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
);
420 void bnx2x_netif_start(struct bnx2x
*bp
);
423 * Fill msix_table, request vectors, update num_queues according
424 * to number of available vectors
430 int bnx2x_enable_msix(struct bnx2x
*bp
);
433 * Request msi mode from OS, updated internals accordingly
439 int bnx2x_enable_msi(struct bnx2x
*bp
);
449 int bnx2x_poll(struct napi_struct
*napi
, int budget
);
452 * Allocate/release memories outsize main driver structure
458 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
);
459 void bnx2x_free_mem_bp(struct bnx2x
*bp
);
462 * Change mtu netdev callback
469 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
);
471 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
);
472 int bnx2x_set_features(struct net_device
*dev
, u32 features
);
475 * tx timeout netdev callback
482 void bnx2x_tx_timeout(struct net_device
*dev
);
486 * vlan rx register netdev callback
493 void bnx2x_vlan_rx_register(struct net_device
*dev
,
494 struct vlan_group
*vlgrp
);
498 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
500 barrier(); /* status block is written to by the chip */
501 fp
->fp_hc_idx
= fp
->sb_running_index
[SM_RX_ID
];
504 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
505 struct bnx2x_fastpath
*fp
,
506 u16 bd_prod
, u16 rx_comp_prod
,
509 struct ustorm_eth_rx_producers rx_prods
= {0};
512 /* Update producers */
513 rx_prods
.bd_prod
= bd_prod
;
514 rx_prods
.cqe_prod
= rx_comp_prod
;
515 rx_prods
.sge_prod
= rx_sge_prod
;
518 * Make sure that the BD and SGE data is updated before updating the
519 * producers since FW might read the BD/SGE right after the producer
521 * This is only applicable for weak-ordered memory model archs such
522 * as IA-64. The following barrier is also mandatory since FW will
523 * assumes BDs must have buffers.
527 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
529 BAR_USTRORM_INTMEM
+ fp
->ustorm_rx_prods_offset
+ i
*4,
530 ((u32
*)&rx_prods
)[i
]);
532 mmiowb(); /* keep prod updates ordered */
534 DP(NETIF_MSG_RX_STATUS
,
535 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
536 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
539 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x
*bp
, u8 igu_sb_id
,
540 u8 segment
, u16 index
, u8 op
,
541 u8 update
, u32 igu_addr
)
543 struct igu_regular cmd_data
= {0};
545 cmd_data
.sb_id_and_flags
=
546 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
547 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
548 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
549 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
551 DP(NETIF_MSG_HW
, "write 0x%08x to IGU addr 0x%x\n",
552 cmd_data
.sb_id_and_flags
, igu_addr
);
553 REG_WR(bp
, igu_addr
, cmd_data
.sb_id_and_flags
);
555 /* Make sure that ACK is written */
560 static inline void bnx2x_igu_clear_sb_gen(struct bnx2x
*bp
,
561 u8 idu_sb_id
, bool is_Pf
)
563 u32 data
, ctl
, cnt
= 100;
564 u32 igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
565 u32 igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
566 u32 igu_addr_ack
= IGU_REG_CSTORM_TYPE_0_SB_CLEANUP
+ (idu_sb_id
/32)*4;
567 u32 sb_bit
= 1 << (idu_sb_id
%32);
568 u32 func_encode
= BP_FUNC(bp
) |
569 ((is_Pf
== true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT
);
570 u32 addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ idu_sb_id
;
572 /* Not supported in BC mode */
573 if (CHIP_INT_MODE_IS_BC(bp
))
576 data
= (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
577 << IGU_REGULAR_CLEANUP_TYPE_SHIFT
) |
578 IGU_REGULAR_CLEANUP_SET
|
579 IGU_REGULAR_BCLEANUP
;
581 ctl
= addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
|
582 func_encode
<< IGU_CTRL_REG_FID_SHIFT
|
583 IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
;
585 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
586 data
, igu_addr_data
);
587 REG_WR(bp
, igu_addr_data
, data
);
590 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
592 REG_WR(bp
, igu_addr_ctl
, ctl
);
596 /* wait for clean up to finish */
597 while (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
) && --cnt
)
601 if (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
)) {
602 DP(NETIF_MSG_HW
, "Unable to finish IGU cleanup: "
603 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
604 idu_sb_id
, idu_sb_id
/32, idu_sb_id
%32, cnt
);
608 static inline void bnx2x_hc_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
609 u8 storm
, u16 index
, u8 op
, u8 update
)
611 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
612 COMMAND_REG_INT_ACK
);
613 struct igu_ack_register igu_ack
;
615 igu_ack
.status_block_index
= index
;
616 igu_ack
.sb_id_and_flags
=
617 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
618 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
619 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
620 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
622 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
623 (*(u32
*)&igu_ack
), hc_addr
);
624 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
626 /* Make sure that ACK is written */
631 static inline void bnx2x_igu_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 segment
,
632 u16 index
, u8 op
, u8 update
)
634 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
)*8;
636 bnx2x_igu_ack_sb_gen(bp
, igu_sb_id
, segment
, index
, op
, update
,
640 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 storm
,
641 u16 index
, u8 op
, u8 update
)
643 if (bp
->common
.int_block
== INT_BLOCK_HC
)
644 bnx2x_hc_ack_sb(bp
, igu_sb_id
, storm
, index
, op
, update
);
648 if (CHIP_INT_MODE_IS_BC(bp
))
650 else if (igu_sb_id
!= bp
->igu_dsb_id
)
651 segment
= IGU_SEG_ACCESS_DEF
;
652 else if (storm
== ATTENTION_ID
)
653 segment
= IGU_SEG_ACCESS_ATTN
;
655 segment
= IGU_SEG_ACCESS_DEF
;
656 bnx2x_igu_ack_sb(bp
, igu_sb_id
, segment
, index
, op
, update
);
660 static inline u16
bnx2x_hc_ack_int(struct bnx2x
*bp
)
662 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
663 COMMAND_REG_SIMD_MASK
);
664 u32 result
= REG_RD(bp
, hc_addr
);
666 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
673 static inline u16
bnx2x_igu_ack_int(struct bnx2x
*bp
)
675 u32 igu_addr
= (BAR_IGU_INTMEM
+ IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
*8);
676 u32 result
= REG_RD(bp
, igu_addr
);
678 DP(NETIF_MSG_HW
, "read 0x%08x from IGU addr 0x%x\n",
685 static inline u16
bnx2x_ack_int(struct bnx2x
*bp
)
688 if (bp
->common
.int_block
== INT_BLOCK_HC
)
689 return bnx2x_hc_ack_int(bp
);
691 return bnx2x_igu_ack_int(bp
);
694 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
696 /* Tell compiler that consumer and producer can change */
698 return fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
;
701 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
707 prod
= fp
->tx_bd_prod
;
708 cons
= fp
->tx_bd_cons
;
710 /* NUM_TX_RINGS = number of "next-page" entries
711 It will be used as a threshold */
712 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
714 #ifdef BNX2X_STOP_ON_ERROR
716 WARN_ON(used
> fp
->bp
->tx_ring_size
);
717 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
720 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
723 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
727 /* Tell compiler that status block fields can change */
729 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
730 return hw_cons
!= fp
->tx_pkt_cons
;
733 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
737 /* Tell compiler that status block fields can change */
739 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
740 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
742 return (fp
->rx_comp_cons
!= rx_cons_sb
);
746 * disables tx from stack point of view
750 static inline void bnx2x_tx_disable(struct bnx2x
*bp
)
752 netif_tx_disable(bp
->dev
);
753 netif_carrier_off(bp
->dev
);
756 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
757 struct bnx2x_fastpath
*fp
, u16 index
)
759 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
760 struct page
*page
= sw_buf
->page
;
761 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
763 /* Skip "next page" elements */
767 dma_unmap_page(&bp
->pdev
->dev
, dma_unmap_addr(sw_buf
, mapping
),
768 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
769 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
776 static inline void bnx2x_add_all_napi(struct bnx2x
*bp
)
780 /* Add NAPI objects */
781 for_each_napi_queue(bp
, i
)
782 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
783 bnx2x_poll
, BNX2X_NAPI_WEIGHT
);
786 static inline void bnx2x_del_all_napi(struct bnx2x
*bp
)
790 for_each_napi_queue(bp
, i
)
791 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
794 static inline void bnx2x_disable_msi(struct bnx2x
*bp
)
796 if (bp
->flags
& USING_MSIX_FLAG
) {
797 pci_disable_msix(bp
->pdev
);
798 bp
->flags
&= ~USING_MSIX_FLAG
;
799 } else if (bp
->flags
& USING_MSI_FLAG
) {
800 pci_disable_msi(bp
->pdev
);
801 bp
->flags
&= ~USING_MSI_FLAG
;
805 static inline int bnx2x_calc_num_queues(struct bnx2x
*bp
)
808 min_t(int, num_queues
, BNX2X_MAX_QUEUES(bp
)) :
809 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp
));
812 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
816 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
817 int idx
= RX_SGE_CNT
* i
- 1;
819 for (j
= 0; j
< 2; j
++) {
820 SGE_MASK_CLEAR_BIT(fp
, idx
);
826 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
828 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
829 memset(fp
->sge_mask
, 0xff,
830 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
832 /* Clear the two last indices in the page to 1:
833 these are the indices that correspond to the "next" element,
834 hence will never be indicated and should be removed from
836 bnx2x_clear_sge_mask_next_elems(fp
);
839 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
840 struct bnx2x_fastpath
*fp
, u16 index
)
842 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
843 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
844 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
847 if (unlikely(page
== NULL
))
850 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
851 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
852 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
853 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
858 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
860 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
861 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
866 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
867 struct bnx2x_fastpath
*fp
, u16 index
)
870 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
871 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
874 skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
875 if (unlikely(skb
== NULL
))
878 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
, fp
->rx_buf_size
,
880 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
886 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
888 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
889 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
894 /* note that we are not allocating a new skb,
895 * we are just moving one from cons to prod
896 * we are not creating a new mapping,
897 * so there is no need to check for dma_mapping_error().
899 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
902 struct bnx2x
*bp
= fp
->bp
;
903 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
904 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
905 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
906 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
908 dma_sync_single_for_device(&bp
->pdev
->dev
,
909 dma_unmap_addr(cons_rx_buf
, mapping
),
910 RX_COPY_THRESH
, DMA_FROM_DEVICE
);
912 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
913 dma_unmap_addr_set(prod_rx_buf
, mapping
,
914 dma_unmap_addr(cons_rx_buf
, mapping
));
918 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
919 struct bnx2x_fastpath
*fp
, int last
)
926 for (i
= 0; i
< last
; i
++)
927 bnx2x_free_rx_sge(bp
, fp
, i
);
930 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
931 struct bnx2x_fastpath
*fp
, int last
)
935 for (i
= 0; i
< last
; i
++) {
936 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
937 struct sk_buff
*skb
= rx_buf
->skb
;
940 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
944 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
945 dma_unmap_single(&bp
->pdev
->dev
,
946 dma_unmap_addr(rx_buf
, mapping
),
947 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
954 static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath
*fp
)
958 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
959 struct eth_tx_next_bd
*tx_next_bd
=
960 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
962 tx_next_bd
->addr_hi
=
963 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
964 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
965 tx_next_bd
->addr_lo
=
966 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
967 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
970 SET_FLAG(fp
->tx_db
.data
.header
.header
, DOORBELL_HDR_DB_TYPE
, 1);
971 fp
->tx_db
.data
.zero_fill1
= 0;
972 fp
->tx_db
.data
.prod
= 0;
981 static inline void bnx2x_init_tx_rings(struct bnx2x
*bp
)
985 for_each_tx_queue(bp
, i
)
986 bnx2x_init_tx_ring_one(&bp
->fp
[i
]);
989 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath
*fp
)
993 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
994 struct eth_rx_bd
*rx_bd
;
996 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
998 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
999 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
1001 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
1002 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
1006 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1010 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1011 struct eth_rx_sge
*sge
;
1013 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1015 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1016 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1019 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1020 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1024 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
1027 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
1028 struct eth_rx_cqe_next_page
*nextpg
;
1030 nextpg
= (struct eth_rx_cqe_next_page
*)
1031 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
1033 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
1034 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
1036 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
1037 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
1041 /* Returns the number of actually allocated BDs */
1042 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
1045 struct bnx2x
*bp
= fp
->bp
;
1046 u16 ring_prod
, cqe_ring_prod
;
1049 fp
->rx_comp_cons
= 0;
1050 cqe_ring_prod
= ring_prod
= 0;
1052 /* This routine is called only during fo init so
1053 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1055 for (i
= 0; i
< rx_ring_size
; i
++) {
1056 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
1057 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1060 ring_prod
= NEXT_RX_IDX(ring_prod
);
1061 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
1062 WARN_ON(ring_prod
<= (i
- fp
->eth_q_stats
.rx_skb_alloc_failed
));
1065 if (fp
->eth_q_stats
.rx_skb_alloc_failed
)
1066 BNX2X_ERR("was only able to allocate "
1067 "%d rx skbs on queue[%d]\n",
1068 (i
- fp
->eth_q_stats
.rx_skb_alloc_failed
), fp
->index
);
1070 fp
->rx_bd_prod
= ring_prod
;
1071 /* Limit the CQE producer by the CQE ring size */
1072 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
1074 fp
->rx_pkt
= fp
->rx_calls
= 0;
1076 return i
- fp
->eth_q_stats
.rx_skb_alloc_failed
;
1080 static inline void bnx2x_init_fcoe_fp(struct bnx2x
*bp
)
1082 bnx2x_fcoe(bp
, cl_id
) = BNX2X_FCOE_ETH_CL_ID
+
1083 BP_E1HVN(bp
) * NONE_ETH_CONTEXT_USE
;
1084 bnx2x_fcoe(bp
, cid
) = BNX2X_FCOE_ETH_CID
;
1085 bnx2x_fcoe(bp
, fw_sb_id
) = DEF_SB_ID
;
1086 bnx2x_fcoe(bp
, igu_sb_id
) = bp
->igu_dsb_id
;
1087 bnx2x_fcoe(bp
, bp
) = bp
;
1088 bnx2x_fcoe(bp
, state
) = BNX2X_FP_STATE_CLOSED
;
1089 bnx2x_fcoe(bp
, index
) = FCOE_IDX
;
1090 bnx2x_fcoe(bp
, rx_cons_sb
) = BNX2X_FCOE_L2_RX_INDEX
;
1091 bnx2x_fcoe(bp
, tx_cons_sb
) = BNX2X_FCOE_L2_TX_INDEX
;
1092 /* qZone id equals to FW (per path) client id */
1093 bnx2x_fcoe(bp
, cl_qzone_id
) = bnx2x_fcoe(bp
, cl_id
) +
1094 BP_PORT(bp
)*(CHIP_IS_E2(bp
) ? ETH_MAX_RX_CLIENTS_E2
:
1095 ETH_MAX_RX_CLIENTS_E1H
);
1097 bnx2x_fcoe(bp
, ustorm_rx_prods_offset
) = CHIP_IS_E2(bp
) ?
1098 USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp
, cl_qzone_id
)) :
1099 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp
), bnx2x_fcoe_fp(bp
)->cl_id
);
1104 static inline void __storm_memset_struct(struct bnx2x
*bp
,
1105 u32 addr
, size_t size
, u32
*data
)
1108 for (i
= 0; i
< size
/4; i
++)
1109 REG_WR(bp
, addr
+ (i
* 4), data
[i
]);
1112 static inline void storm_memset_mac_filters(struct bnx2x
*bp
,
1113 struct tstorm_eth_mac_filter_config
*mac_filters
,
1116 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
1118 u32 addr
= BAR_TSTRORM_INTMEM
+
1119 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid
);
1121 __storm_memset_struct(bp
, addr
, size
, (u32
*)mac_filters
);
1124 static inline void storm_memset_cmng(struct bnx2x
*bp
,
1125 struct cmng_struct_per_port
*cmng
,
1129 sizeof(struct rate_shaping_vars_per_port
) +
1130 sizeof(struct fairness_vars_per_port
) +
1131 sizeof(struct safc_struct_per_port
) +
1132 sizeof(struct pfc_struct_per_port
);
1134 u32 addr
= BAR_XSTRORM_INTMEM
+
1135 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
);
1137 __storm_memset_struct(bp
, addr
, size
, (u32
*)cmng
);
1139 addr
+= size
+ 4 /* SKIP DCB+LLFC */;
1140 size
= sizeof(struct cmng_struct_per_port
) -
1141 size
/* written */ - 4 /*skipped*/;
1143 __storm_memset_struct(bp
, addr
, size
,
1144 (u32
*)(cmng
->traffic_type_to_priority_cos
));
1147 /* HW Lock for shared dual port PHYs */
1148 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
);
1149 void bnx2x_release_phy_lock(struct bnx2x
*bp
);
1152 * Extracts MAX BW part from MF configuration.
1159 static inline u16
bnx2x_extract_max_cfg(struct bnx2x
*bp
, u32 mf_cfg
)
1161 u16 max_cfg
= (mf_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1162 FUNC_MF_CFG_MAX_BW_SHIFT
;
1164 BNX2X_ERR("Illegal configuration detected for Max BW - "
1165 "using 100 instead\n");
1171 #endif /* BNX2X_CMN_H */