2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/bitops.h>
38 #include <linux/compiler.h>
39 #include <linux/list.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/net_tstamp.h>
44 #ifdef CONFIG_MLX4_EN_DCB
45 #include <linux/dcbnl.h>
47 #include <linux/cpu_rmap.h>
48 #include <linux/ptp_clock_kernel.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/qp.h>
52 #include <linux/mlx4/cq.h>
53 #include <linux/mlx4/srq.h>
54 #include <linux/mlx4/doorbell.h>
55 #include <linux/mlx4/cmd.h>
59 #define DRV_NAME "mlx4_en"
60 #define DRV_VERSION "2.2-1"
61 #define DRV_RELDATE "Feb 2014"
63 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
70 #define MLX4_EN_PAGE_SHIFT 12
71 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
72 #define DEF_RX_RINGS 16
73 #define MAX_RX_RINGS 128
74 #define MIN_RX_RINGS 4
76 #define HEADROOM (2048 / TXBB_SIZE + 1)
77 #define STAMP_STRIDE 64
78 #define STAMP_DWORDS (STAMP_STRIDE / 4)
79 #define STAMP_SHIFT 31
80 #define STAMP_VAL 0x7fffffff
81 #define STATS_DELAY (HZ / 4)
82 #define SERVICE_TASK_DELAY (HZ / 4)
83 #define MAX_NUM_OF_FS_RULES 256
85 #define MLX4_EN_FILTER_HASH_SHIFT 4
86 #define MLX4_EN_FILTER_EXPIRY_QUOTA 60
88 /* Typical TSO descriptor with 16 gather entries is 352 bytes... */
89 #define MAX_DESC_SIZE 512
90 #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
93 * OS related constants and tunables
96 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
98 /* Use the maximum between 16384 and a single page */
99 #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
101 #define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
103 /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
104 * and 4K allocations) */
106 FRAG_SZ0
= 1536 - NET_IP_ALIGN
,
109 FRAG_SZ3
= MLX4_EN_ALLOC_SIZE
111 #define MLX4_EN_MAX_RX_FRAGS 4
113 /* Maximum ring sizes */
114 #define MLX4_EN_MAX_TX_SIZE 8192
115 #define MLX4_EN_MAX_RX_SIZE 8192
117 /* Minimum ring size for our page-allocation scheme to work */
118 #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
119 #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
121 #define MLX4_EN_SMALL_PKT_SIZE 64
122 #define MLX4_EN_MAX_TX_RING_P_UP 32
123 #define MLX4_EN_NUM_UP 8
124 #define MLX4_EN_DEF_TX_RING_SIZE 512
125 #define MLX4_EN_DEF_RX_RING_SIZE 1024
126 #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
129 /* Target number of packets to coalesce with interrupt moderation */
130 #define MLX4_EN_RX_COAL_TARGET 44
131 #define MLX4_EN_RX_COAL_TIME 0x10
133 #define MLX4_EN_TX_COAL_PKTS 16
134 #define MLX4_EN_TX_COAL_TIME 0x10
136 #define MLX4_EN_RX_RATE_LOW 400000
137 #define MLX4_EN_RX_COAL_TIME_LOW 0
138 #define MLX4_EN_RX_RATE_HIGH 450000
139 #define MLX4_EN_RX_COAL_TIME_HIGH 128
140 #define MLX4_EN_RX_SIZE_THRESH 1024
141 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
142 #define MLX4_EN_SAMPLE_INTERVAL 0
143 #define MLX4_EN_AVG_PKT_SMALL 256
145 #define MLX4_EN_AUTO_CONF 0xffff
147 #define MLX4_EN_DEF_RX_PAUSE 1
148 #define MLX4_EN_DEF_TX_PAUSE 1
150 /* Interval between successive polls in the Tx routine when polling is used
151 instead of interrupts (in per-core Tx rings) - should be power of 2 */
152 #define MLX4_EN_TX_POLL_MODER 16
153 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
155 #define ETH_LLC_SNAP_SIZE 8
157 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
158 #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
159 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
161 #define MLX4_EN_MIN_MTU 46
162 #define ETH_BCAST 0xffffffffffffULL
164 #define MLX4_EN_LOOPBACK_RETRIES 5
165 #define MLX4_EN_LOOPBACK_TIMEOUT 100
167 #ifdef MLX4_EN_PERF_STAT
168 /* Number of samples to 'average' */
170 #define AVG_FACTOR 1024
171 #define NUM_PERF_STATS NUM_PERF_COUNTERS
173 #define INC_PERF_COUNTER(cnt) (++(cnt))
174 #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
175 #define AVG_PERF_COUNTER(cnt, sample) \
176 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
177 #define GET_PERF_COUNTER(cnt) (cnt)
178 #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
182 #define NUM_PERF_STATS 0
183 #define INC_PERF_COUNTER(cnt) do {} while (0)
184 #define ADD_PERF_COUNTER(cnt, add) do {} while (0)
185 #define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
186 #define GET_PERF_COUNTER(cnt) (0)
187 #define GET_AVG_PERF_COUNTER(cnt) (0)
188 #endif /* MLX4_EN_PERF_STAT */
190 /* Constants for TX flow */
192 MAX_INLINE
= 104, /* 128 - 16 - 4 - 4 */
210 #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
211 #define XNOR(x, y) (!(x) == !(y))
214 struct mlx4_en_tx_info
{
225 #define MLX4_EN_BIT_DESC_OWN 0x80000000
226 #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
227 #define MLX4_EN_MEMTYPE_PAD 0x100
228 #define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
231 struct mlx4_en_tx_desc
{
232 struct mlx4_wqe_ctrl_seg ctrl
;
234 struct mlx4_wqe_data_seg data
; /* at least one data segment */
235 struct mlx4_wqe_lso_seg lso
;
236 struct mlx4_wqe_inline_seg inl
;
240 #define MLX4_EN_USE_SRQ 0x01000000
242 #define MLX4_EN_CX3_LOW_ID 0x1000
243 #define MLX4_EN_CX3_HIGH_ID 0x1005
245 struct mlx4_en_rx_alloc
{
252 struct mlx4_en_tx_ring
{
253 struct mlx4_hwq_resources wqres
;
254 u32 size
; /* number of TXBBs */
257 u16 cqn
; /* index of port CQ associated with this ring */
264 struct mlx4_en_tx_info
*tx_info
;
267 cpumask_t affinity_mask
;
270 struct mlx4_qp_context context
;
272 enum mlx4_qp_state qp_state
;
273 struct mlx4_srq dummy
;
275 unsigned long packets
;
276 unsigned long tx_csum
;
277 unsigned long queue_stopped
;
278 unsigned long wake_queue
;
281 struct netdev_queue
*tx_queue
;
282 int hwtstamp_tx_type
;
286 struct mlx4_en_rx_desc
{
287 /* actual number of entries depends on rx ring stride */
288 struct mlx4_wqe_data_seg data
[0];
291 struct mlx4_en_rx_ring
{
292 struct mlx4_hwq_resources wqres
;
293 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
294 u32 size
; /* number of Rx descs*/
299 u16 cqn
; /* index of port CQ associated with this ring */
307 unsigned long packets
;
308 #ifdef CONFIG_NET_RX_BUSY_POLL
309 unsigned long yields
;
310 unsigned long misses
;
311 unsigned long cleaned
;
313 unsigned long csum_ok
;
314 unsigned long csum_none
;
315 int hwtstamp_rx_filter
;
316 cpumask_var_t affinity_mask
;
321 struct mlx4_hwq_resources wqres
;
323 struct net_device
*dev
;
324 struct napi_struct napi
;
331 struct mlx4_cqe
*buf
;
332 #define MLX4_EN_OPCODE_ERROR 0x1e
334 #ifdef CONFIG_NET_RX_BUSY_POLL
336 #define MLX4_EN_CQ_STATE_IDLE 0
337 #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
338 #define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
339 #define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
340 #define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
341 #define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
342 #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
343 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
344 spinlock_t poll_lock
; /* protects from LLS/napi conflicts */
345 #endif /* CONFIG_NET_RX_BUSY_POLL */
348 struct mlx4_en_port_profile
{
362 struct mlx4_en_profile
{
369 u8 num_tx_rings_p_up
;
370 struct mlx4_en_port_profile prof
[MLX4_MAX_PORTS
+ 1];
374 struct mlx4_dev
*dev
;
375 struct pci_dev
*pdev
;
376 struct mutex state_lock
;
377 struct net_device
*pndev
[MLX4_MAX_PORTS
+ 1];
380 struct mlx4_en_profile profile
;
382 struct workqueue_struct
*workqueue
;
383 struct device
*dma_device
;
384 void __iomem
*uar_map
;
385 struct mlx4_uar priv_uar
;
389 u8 mac_removed
[MLX4_MAX_PORTS
+ 1];
392 struct cyclecounter cycles
;
393 struct timecounter clock
;
394 unsigned long last_overflow_check
;
395 unsigned long overflow_period
;
396 struct ptp_clock
*ptp_clock
;
397 struct ptp_clock_info ptp_clock_info
;
401 struct mlx4_en_rss_map
{
403 struct mlx4_qp qps
[MAX_RX_RINGS
];
404 enum mlx4_qp_state state
[MAX_RX_RINGS
];
405 struct mlx4_qp indir_qp
;
406 enum mlx4_qp_state indir_state
;
409 struct mlx4_en_port_state
{
415 struct mlx4_en_pkt_stats
{
416 unsigned long broadcast
;
417 unsigned long rx_prio
[8];
418 unsigned long tx_prio
[8];
419 #define NUM_PKT_STATS 17
422 struct mlx4_en_port_stats
{
423 unsigned long tso_packets
;
424 unsigned long queue_stopped
;
425 unsigned long wake_queue
;
426 unsigned long tx_timeout
;
427 unsigned long rx_alloc_failed
;
428 unsigned long rx_chksum_good
;
429 unsigned long rx_chksum_none
;
430 unsigned long tx_chksum_offload
;
431 #define NUM_PORT_STATS 8
434 struct mlx4_en_perf_stats
{
441 #define NUM_PERF_COUNTERS 6
444 enum mlx4_en_mclist_act
{
450 struct mlx4_en_mc_list
{
451 struct list_head list
;
452 enum mlx4_en_mclist_act action
;
458 struct mlx4_en_frag_info
{
460 u16 frag_prefix_size
;
465 #ifdef CONFIG_MLX4_EN_DCB
466 /* Minimal TC BW - setting to 0 will block traffic */
467 #define MLX4_EN_BW_MIN 1
468 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
470 #define MLX4_EN_TC_ETS 7
474 struct ethtool_flow_id
{
475 struct list_head list
;
476 struct ethtool_rx_flow_spec flow_spec
;
481 MLX4_EN_FLAG_PROMISC
= (1 << 0),
482 MLX4_EN_FLAG_MC_PROMISC
= (1 << 1),
483 /* whether we need to enable hardware loopback by putting dmac
486 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK
= (1 << 2),
487 /* whether we need to drop packets that hardware loopback-ed */
488 MLX4_EN_FLAG_RX_FILTER_NEEDED
= (1 << 3),
489 MLX4_EN_FLAG_FORCE_PROMISC
= (1 << 4)
492 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
493 #define MLX4_EN_MAC_HASH_IDX 5
495 struct mlx4_en_priv
{
496 struct mlx4_en_dev
*mdev
;
497 struct mlx4_en_port_profile
*prof
;
498 struct net_device
*dev
;
499 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
500 struct net_device_stats stats
;
501 struct net_device_stats ret_stats
;
502 struct mlx4_en_port_state port_state
;
503 spinlock_t stats_lock
;
504 struct ethtool_flow_id ethtool_rules
[MAX_NUM_OF_FS_RULES
];
505 /* To allow rules removal while port is going down */
506 struct list_head ethtool_list
;
508 unsigned long last_moder_packets
[MAX_RX_RINGS
];
509 unsigned long last_moder_tx_packets
;
510 unsigned long last_moder_bytes
[MAX_RX_RINGS
];
511 unsigned long last_moder_jiffies
;
512 int last_moder_time
[MAX_RX_RINGS
];
522 u16 adaptive_rx_coal
;
525 u32 validate_loopback
;
527 struct mlx4_hwq_resources res
;
535 unsigned char prev_mac
[ETH_ALEN
+ 2];
541 struct mlx4_en_rss_map rss_map
;
544 u8 num_tx_rings_p_up
;
548 struct mlx4_en_frag_info frag_info
[MLX4_EN_MAX_RX_FRAGS
];
552 struct mlx4_en_tx_ring
**tx_ring
;
553 struct mlx4_en_rx_ring
*rx_ring
[MAX_RX_RINGS
];
554 struct mlx4_en_cq
**tx_cq
;
555 struct mlx4_en_cq
*rx_cq
[MAX_RX_RINGS
];
556 struct mlx4_qp drop_qp
;
557 struct work_struct rx_mode_task
;
558 struct work_struct watchdog_task
;
559 struct work_struct linkstate_task
;
560 struct delayed_work stats_task
;
561 struct delayed_work service_task
;
562 #ifdef CONFIG_MLX4_EN_VXLAN
563 struct work_struct vxlan_add_task
;
564 struct work_struct vxlan_del_task
;
566 struct mlx4_en_perf_stats pstats
;
567 struct mlx4_en_pkt_stats pkstats
;
568 struct mlx4_en_port_stats port_stats
;
570 struct list_head mc_list
;
571 struct list_head curr_list
;
573 struct mlx4_en_stat_out_mbox hw_stats
;
578 struct hlist_head mac_hash
[MLX4_EN_MAC_HASH_SIZE
];
579 struct hwtstamp_config hwtstamp_config
;
581 #ifdef CONFIG_MLX4_EN_DCB
583 u16 maxrate
[IEEE_8021QAZ_MAX_TCS
];
585 #ifdef CONFIG_RFS_ACCEL
586 spinlock_t filters_lock
;
588 struct list_head filters
;
589 struct hlist_head filter_hash
[1 << MLX4_EN_FILTER_HASH_SHIFT
];
596 MLX4_EN_WOL_MAGIC
= (1ULL << 61),
597 MLX4_EN_WOL_ENABLED
= (1ULL << 62),
600 struct mlx4_mac_entry
{
601 struct hlist_node hlist
;
602 unsigned char mac
[ETH_ALEN
+ 2];
607 #ifdef CONFIG_NET_RX_BUSY_POLL
608 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq
*cq
)
610 spin_lock_init(&cq
->poll_lock
);
611 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
614 /* called from the device poll rutine to get ownership of a cq */
615 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq
*cq
)
618 spin_lock(&cq
->poll_lock
);
619 if (cq
->state
& MLX4_CQ_LOCKED
) {
620 WARN_ON(cq
->state
& MLX4_EN_CQ_STATE_NAPI
);
621 cq
->state
|= MLX4_EN_CQ_STATE_NAPI_YIELD
;
624 /* we don't care if someone yielded */
625 cq
->state
= MLX4_EN_CQ_STATE_NAPI
;
626 spin_unlock(&cq
->poll_lock
);
630 /* returns true is someone tried to get the cq while napi had it */
631 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq
*cq
)
634 spin_lock(&cq
->poll_lock
);
635 WARN_ON(cq
->state
& (MLX4_EN_CQ_STATE_POLL
|
636 MLX4_EN_CQ_STATE_NAPI_YIELD
));
638 if (cq
->state
& MLX4_EN_CQ_STATE_POLL_YIELD
)
640 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
641 spin_unlock(&cq
->poll_lock
);
645 /* called from mlx4_en_low_latency_poll() */
646 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq
*cq
)
649 spin_lock_bh(&cq
->poll_lock
);
650 if ((cq
->state
& MLX4_CQ_LOCKED
)) {
651 struct net_device
*dev
= cq
->dev
;
652 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
653 struct mlx4_en_rx_ring
*rx_ring
= priv
->rx_ring
[cq
->ring
];
655 cq
->state
|= MLX4_EN_CQ_STATE_POLL_YIELD
;
659 /* preserve yield marks */
660 cq
->state
|= MLX4_EN_CQ_STATE_POLL
;
661 spin_unlock_bh(&cq
->poll_lock
);
665 /* returns true if someone tried to get the cq while it was locked */
666 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq
*cq
)
669 spin_lock_bh(&cq
->poll_lock
);
670 WARN_ON(cq
->state
& (MLX4_EN_CQ_STATE_NAPI
));
672 if (cq
->state
& MLX4_EN_CQ_STATE_POLL_YIELD
)
674 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
675 spin_unlock_bh(&cq
->poll_lock
);
679 /* true if a socket is polling, even if it did not get the lock */
680 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq
*cq
)
682 WARN_ON(!(cq
->state
& MLX4_CQ_LOCKED
));
683 return cq
->state
& CQ_USER_PEND
;
686 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq
*cq
)
690 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq
*cq
)
695 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq
*cq
)
700 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq
*cq
)
705 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq
*cq
)
710 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq
*cq
)
714 #endif /* CONFIG_NET_RX_BUSY_POLL */
716 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
718 void mlx4_en_update_loopback_state(struct net_device
*dev
,
719 netdev_features_t features
);
721 void mlx4_en_destroy_netdev(struct net_device
*dev
);
722 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
723 struct mlx4_en_port_profile
*prof
);
725 int mlx4_en_start_port(struct net_device
*dev
);
726 void mlx4_en_stop_port(struct net_device
*dev
, int detach
);
728 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
);
729 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
);
731 int mlx4_en_create_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
**pcq
,
732 int entries
, int ring
, enum cq_type mode
, int node
);
733 void mlx4_en_destroy_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
**pcq
);
734 int mlx4_en_activate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
,
736 void mlx4_en_deactivate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
737 int mlx4_en_set_cq_moder(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
738 int mlx4_en_arm_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
740 void mlx4_en_tx_irq(struct mlx4_cq
*mcq
);
741 u16
mlx4_en_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
742 void *accel_priv
, select_queue_fallback_t fallback
);
743 netdev_tx_t
mlx4_en_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
745 int mlx4_en_create_tx_ring(struct mlx4_en_priv
*priv
,
746 struct mlx4_en_tx_ring
**pring
,
747 int qpn
, u32 size
, u16 stride
,
748 int node
, int queue_index
);
749 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv
*priv
,
750 struct mlx4_en_tx_ring
**pring
);
751 int mlx4_en_activate_tx_ring(struct mlx4_en_priv
*priv
,
752 struct mlx4_en_tx_ring
*ring
,
753 int cq
, int user_prio
);
754 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv
*priv
,
755 struct mlx4_en_tx_ring
*ring
);
756 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
);
757 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
758 struct mlx4_en_rx_ring
**pring
,
759 u32 size
, u16 stride
, int node
);
760 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
761 struct mlx4_en_rx_ring
**pring
,
762 u32 size
, u16 stride
);
763 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
);
764 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
765 struct mlx4_en_rx_ring
*ring
);
766 int mlx4_en_process_rx_cq(struct net_device
*dev
,
767 struct mlx4_en_cq
*cq
,
769 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
);
770 int mlx4_en_poll_tx_cq(struct napi_struct
*napi
, int budget
);
771 void mlx4_en_fill_qp_context(struct mlx4_en_priv
*priv
, int size
, int stride
,
772 int is_tx
, int rss
, int qpn
, int cqn
, int user_prio
,
773 struct mlx4_qp_context
*context
);
774 void mlx4_en_sqp_event(struct mlx4_qp
*qp
, enum mlx4_event event
);
775 int mlx4_en_map_buffer(struct mlx4_buf
*buf
);
776 void mlx4_en_unmap_buffer(struct mlx4_buf
*buf
);
778 void mlx4_en_calc_rx_buf(struct net_device
*dev
);
779 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
);
780 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
);
781 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
);
782 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
);
783 int mlx4_en_free_tx_buf(struct net_device
*dev
, struct mlx4_en_tx_ring
*ring
);
784 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
);
786 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
, u64 mac
, u64 clear
, u8 mode
);
787 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
);
789 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
);
790 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
);
792 #ifdef CONFIG_MLX4_EN_DCB
793 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops
;
794 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops
;
797 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
);
799 #ifdef CONFIG_RFS_ACCEL
800 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
);
803 #define MLX4_EN_NUM_SELF_TEST 5
804 void mlx4_en_ex_selftest(struct net_device
*dev
, u32
*flags
, u64
*buf
);
805 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev
*mdev
);
808 * Functions for time stamping
810 u64
mlx4_en_get_cqe_ts(struct mlx4_cqe
*cqe
);
811 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev
*mdev
,
812 struct skb_shared_hwtstamps
*hwts
,
814 void mlx4_en_init_timestamp(struct mlx4_en_dev
*mdev
);
815 void mlx4_en_remove_timestamp(struct mlx4_en_dev
*mdev
);
816 int mlx4_en_timestamp_config(struct net_device
*dev
,
822 extern const struct ethtool_ops mlx4_en_ethtool_ops
;
827 * printk / logging functions
831 int en_print(const char *level
, const struct mlx4_en_priv
*priv
,
832 const char *format
, ...);
834 #define en_dbg(mlevel, priv, format, ...) \
836 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
837 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
839 #define en_warn(priv, format, ...) \
840 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
841 #define en_err(priv, format, ...) \
842 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
843 #define en_info(priv, format, ...) \
844 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
846 #define mlx4_err(mdev, format, ...) \
847 pr_err(DRV_NAME " %s: " format, \
848 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
849 #define mlx4_info(mdev, format, ...) \
850 pr_info(DRV_NAME " %s: " format, \
851 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
852 #define mlx4_warn(mdev, format, ...) \
853 pr_warn(DRV_NAME " %s: " format, \
854 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)