2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/bitops.h>
38 #include <linux/compiler.h>
39 #include <linux/list.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/net_tstamp.h>
44 #ifdef CONFIG_MLX4_EN_DCB
45 #include <linux/dcbnl.h>
47 #include <linux/cpu_rmap.h>
48 #include <linux/ptp_clock_kernel.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/qp.h>
52 #include <linux/mlx4/cq.h>
53 #include <linux/mlx4/srq.h>
54 #include <linux/mlx4/doorbell.h>
55 #include <linux/mlx4/cmd.h>
59 #define DRV_NAME "mlx4_en"
60 #define DRV_VERSION "2.2-1"
61 #define DRV_RELDATE "Feb 2014"
63 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
70 #define MLX4_EN_PAGE_SHIFT 12
71 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
72 #define DEF_RX_RINGS 16
73 #define MAX_RX_RINGS 128
74 #define MIN_RX_RINGS 4
76 #define HEADROOM (2048 / TXBB_SIZE + 1)
77 #define STAMP_STRIDE 64
78 #define STAMP_DWORDS (STAMP_STRIDE / 4)
79 #define STAMP_SHIFT 31
80 #define STAMP_VAL 0x7fffffff
81 #define STATS_DELAY (HZ / 4)
82 #define SERVICE_TASK_DELAY (HZ / 4)
83 #define MAX_NUM_OF_FS_RULES 256
85 #define MLX4_EN_FILTER_HASH_SHIFT 4
86 #define MLX4_EN_FILTER_EXPIRY_QUOTA 60
88 /* Typical TSO descriptor with 16 gather entries is 352 bytes... */
89 #define MAX_DESC_SIZE 512
90 #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
93 * OS related constants and tunables
96 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
98 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
100 /* Use the maximum between 16384 and a single page */
101 #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
103 #define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
105 /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
106 * and 4K allocations) */
108 FRAG_SZ0
= 1536 - NET_IP_ALIGN
,
111 FRAG_SZ3
= MLX4_EN_ALLOC_SIZE
113 #define MLX4_EN_MAX_RX_FRAGS 4
115 /* Maximum ring sizes */
116 #define MLX4_EN_MAX_TX_SIZE 8192
117 #define MLX4_EN_MAX_RX_SIZE 8192
119 /* Minimum ring size for our page-allocation scheme to work */
120 #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
121 #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
123 #define MLX4_EN_SMALL_PKT_SIZE 64
124 #define MLX4_EN_MIN_TX_RING_P_UP 1
125 #define MLX4_EN_MAX_TX_RING_P_UP 32
126 #define MLX4_EN_NUM_UP 8
127 #define MLX4_EN_DEF_TX_RING_SIZE 512
128 #define MLX4_EN_DEF_RX_RING_SIZE 1024
129 #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
132 #define MLX4_EN_DEFAULT_TX_WORK 256
134 /* Target number of packets to coalesce with interrupt moderation */
135 #define MLX4_EN_RX_COAL_TARGET 44
136 #define MLX4_EN_RX_COAL_TIME 0x10
138 #define MLX4_EN_TX_COAL_PKTS 16
139 #define MLX4_EN_TX_COAL_TIME 0x10
141 #define MLX4_EN_RX_RATE_LOW 400000
142 #define MLX4_EN_RX_COAL_TIME_LOW 0
143 #define MLX4_EN_RX_RATE_HIGH 450000
144 #define MLX4_EN_RX_COAL_TIME_HIGH 128
145 #define MLX4_EN_RX_SIZE_THRESH 1024
146 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
147 #define MLX4_EN_SAMPLE_INTERVAL 0
148 #define MLX4_EN_AVG_PKT_SMALL 256
150 #define MLX4_EN_AUTO_CONF 0xffff
152 #define MLX4_EN_DEF_RX_PAUSE 1
153 #define MLX4_EN_DEF_TX_PAUSE 1
155 /* Interval between successive polls in the Tx routine when polling is used
156 instead of interrupts (in per-core Tx rings) - should be power of 2 */
157 #define MLX4_EN_TX_POLL_MODER 16
158 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
160 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
161 #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
162 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
164 #define MLX4_EN_MIN_MTU 46
165 #define ETH_BCAST 0xffffffffffffULL
167 #define MLX4_EN_LOOPBACK_RETRIES 5
168 #define MLX4_EN_LOOPBACK_TIMEOUT 100
170 #ifdef MLX4_EN_PERF_STAT
171 /* Number of samples to 'average' */
173 #define AVG_FACTOR 1024
174 #define NUM_PERF_STATS NUM_PERF_COUNTERS
176 #define INC_PERF_COUNTER(cnt) (++(cnt))
177 #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
178 #define AVG_PERF_COUNTER(cnt, sample) \
179 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
180 #define GET_PERF_COUNTER(cnt) (cnt)
181 #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
185 #define NUM_PERF_STATS 0
186 #define INC_PERF_COUNTER(cnt) do {} while (0)
187 #define ADD_PERF_COUNTER(cnt, add) do {} while (0)
188 #define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
189 #define GET_PERF_COUNTER(cnt) (0)
190 #define GET_AVG_PERF_COUNTER(cnt) (0)
191 #endif /* MLX4_EN_PERF_STAT */
193 /* Constants for TX flow */
195 MAX_INLINE
= 104, /* 128 - 16 - 4 - 4 */
213 #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
214 #define XNOR(x, y) (!(x) == !(y))
217 struct mlx4_en_tx_info
{
228 } ____cacheline_aligned_in_smp
;
231 #define MLX4_EN_BIT_DESC_OWN 0x80000000
232 #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
233 #define MLX4_EN_MEMTYPE_PAD 0x100
234 #define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
237 struct mlx4_en_tx_desc
{
238 struct mlx4_wqe_ctrl_seg ctrl
;
240 struct mlx4_wqe_data_seg data
; /* at least one data segment */
241 struct mlx4_wqe_lso_seg lso
;
242 struct mlx4_wqe_inline_seg inl
;
246 #define MLX4_EN_USE_SRQ 0x01000000
248 #define MLX4_EN_CX3_LOW_ID 0x1000
249 #define MLX4_EN_CX3_HIGH_ID 0x1005
251 struct mlx4_en_rx_alloc
{
258 struct mlx4_en_tx_ring
{
259 /* cache line used and dirtied in tx completion
260 * (mlx4_en_free_tx_buf())
264 unsigned long wake_queue
;
266 /* cache line used and dirtied in mlx4_en_xmit() */
267 u32 prod ____cacheline_aligned_in_smp
;
269 unsigned long packets
;
270 unsigned long tx_csum
;
271 unsigned long tso_packets
;
272 unsigned long xmit_more
;
274 unsigned long queue_stopped
;
276 /* Following part should be mostly read */
277 cpumask_t affinity_mask
;
279 struct mlx4_hwq_resources wqres
;
280 u32 size
; /* number of TXBBs */
283 u16 cqn
; /* index of port CQ associated with this ring */
288 struct mlx4_en_tx_info
*tx_info
;
290 struct mlx4_qp_context context
;
292 enum mlx4_qp_state qp_state
;
296 struct netdev_queue
*tx_queue
;
297 int hwtstamp_tx_type
;
298 } ____cacheline_aligned_in_smp
;
300 struct mlx4_en_rx_desc
{
301 /* actual number of entries depends on rx ring stride */
302 struct mlx4_wqe_data_seg data
[0];
305 struct mlx4_en_rx_ring
{
306 struct mlx4_hwq_resources wqres
;
307 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
308 u32 size
; /* number of Rx descs*/
313 u16 cqn
; /* index of port CQ associated with this ring */
321 unsigned long packets
;
322 #ifdef CONFIG_NET_RX_BUSY_POLL
323 unsigned long yields
;
324 unsigned long misses
;
325 unsigned long cleaned
;
327 unsigned long csum_ok
;
328 unsigned long csum_none
;
329 int hwtstamp_rx_filter
;
330 cpumask_var_t affinity_mask
;
335 struct mlx4_hwq_resources wqres
;
337 struct net_device
*dev
;
338 struct napi_struct napi
;
345 struct mlx4_cqe
*buf
;
346 #define MLX4_EN_OPCODE_ERROR 0x1e
348 #ifdef CONFIG_NET_RX_BUSY_POLL
350 #define MLX4_EN_CQ_STATE_IDLE 0
351 #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
352 #define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
353 #define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
354 #define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
355 #define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
356 #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
357 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
358 spinlock_t poll_lock
; /* protects from LLS/napi conflicts */
359 #endif /* CONFIG_NET_RX_BUSY_POLL */
360 struct irq_desc
*irq_desc
;
363 struct mlx4_en_port_profile
{
377 struct mlx4_en_profile
{
384 u8 num_tx_rings_p_up
;
385 struct mlx4_en_port_profile prof
[MLX4_MAX_PORTS
+ 1];
389 struct mlx4_dev
*dev
;
390 struct pci_dev
*pdev
;
391 struct mutex state_lock
;
392 struct net_device
*pndev
[MLX4_MAX_PORTS
+ 1];
395 struct mlx4_en_profile profile
;
397 struct workqueue_struct
*workqueue
;
398 struct device
*dma_device
;
399 void __iomem
*uar_map
;
400 struct mlx4_uar priv_uar
;
404 u8 mac_removed
[MLX4_MAX_PORTS
+ 1];
407 struct cyclecounter cycles
;
408 struct timecounter clock
;
409 unsigned long last_overflow_check
;
410 unsigned long overflow_period
;
411 struct ptp_clock
*ptp_clock
;
412 struct ptp_clock_info ptp_clock_info
;
416 struct mlx4_en_rss_map
{
418 struct mlx4_qp qps
[MAX_RX_RINGS
];
419 enum mlx4_qp_state state
[MAX_RX_RINGS
];
420 struct mlx4_qp indir_qp
;
421 enum mlx4_qp_state indir_state
;
424 struct mlx4_en_port_state
{
430 struct mlx4_en_pkt_stats
{
431 unsigned long broadcast
;
432 unsigned long rx_prio
[8];
433 unsigned long tx_prio
[8];
434 #define NUM_PKT_STATS 17
437 struct mlx4_en_port_stats
{
438 unsigned long tso_packets
;
439 unsigned long xmit_more
;
440 unsigned long queue_stopped
;
441 unsigned long wake_queue
;
442 unsigned long tx_timeout
;
443 unsigned long rx_alloc_failed
;
444 unsigned long rx_chksum_good
;
445 unsigned long rx_chksum_none
;
446 unsigned long tx_chksum_offload
;
447 #define NUM_PORT_STATS 9
450 struct mlx4_en_perf_stats
{
457 #define NUM_PERF_COUNTERS 6
460 enum mlx4_en_mclist_act
{
466 struct mlx4_en_mc_list
{
467 struct list_head list
;
468 enum mlx4_en_mclist_act action
;
474 struct mlx4_en_frag_info
{
476 u16 frag_prefix_size
;
481 #ifdef CONFIG_MLX4_EN_DCB
482 /* Minimal TC BW - setting to 0 will block traffic */
483 #define MLX4_EN_BW_MIN 1
484 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
486 #define MLX4_EN_TC_ETS 7
490 struct ethtool_flow_id
{
491 struct list_head list
;
492 struct ethtool_rx_flow_spec flow_spec
;
497 MLX4_EN_FLAG_PROMISC
= (1 << 0),
498 MLX4_EN_FLAG_MC_PROMISC
= (1 << 1),
499 /* whether we need to enable hardware loopback by putting dmac
502 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK
= (1 << 2),
503 /* whether we need to drop packets that hardware loopback-ed */
504 MLX4_EN_FLAG_RX_FILTER_NEEDED
= (1 << 3),
505 MLX4_EN_FLAG_FORCE_PROMISC
= (1 << 4)
508 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
509 #define MLX4_EN_MAC_HASH_IDX 5
511 struct mlx4_en_priv
{
512 struct mlx4_en_dev
*mdev
;
513 struct mlx4_en_port_profile
*prof
;
514 struct net_device
*dev
;
515 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
516 struct net_device_stats stats
;
517 struct net_device_stats ret_stats
;
518 struct mlx4_en_port_state port_state
;
519 spinlock_t stats_lock
;
520 struct ethtool_flow_id ethtool_rules
[MAX_NUM_OF_FS_RULES
];
521 /* To allow rules removal while port is going down */
522 struct list_head ethtool_list
;
524 unsigned long last_moder_packets
[MAX_RX_RINGS
];
525 unsigned long last_moder_tx_packets
;
526 unsigned long last_moder_bytes
[MAX_RX_RINGS
];
527 unsigned long last_moder_jiffies
;
528 int last_moder_time
[MAX_RX_RINGS
];
538 u16 adaptive_rx_coal
;
541 u32 validate_loopback
;
543 struct mlx4_hwq_resources res
;
551 unsigned char current_mac
[ETH_ALEN
+ 2];
558 struct mlx4_en_rss_map rss_map
;
561 u8 num_tx_rings_p_up
;
566 struct mlx4_en_frag_info frag_info
[MLX4_EN_MAX_RX_FRAGS
];
570 struct mlx4_en_tx_ring
**tx_ring
;
571 struct mlx4_en_rx_ring
*rx_ring
[MAX_RX_RINGS
];
572 struct mlx4_en_cq
**tx_cq
;
573 struct mlx4_en_cq
*rx_cq
[MAX_RX_RINGS
];
574 struct mlx4_qp drop_qp
;
575 struct work_struct rx_mode_task
;
576 struct work_struct watchdog_task
;
577 struct work_struct linkstate_task
;
578 struct delayed_work stats_task
;
579 struct delayed_work service_task
;
580 #ifdef CONFIG_MLX4_EN_VXLAN
581 struct work_struct vxlan_add_task
;
582 struct work_struct vxlan_del_task
;
584 struct mlx4_en_perf_stats pstats
;
585 struct mlx4_en_pkt_stats pkstats
;
586 struct mlx4_en_port_stats port_stats
;
588 struct list_head mc_list
;
589 struct list_head curr_list
;
591 struct mlx4_en_stat_out_mbox hw_stats
;
596 struct hlist_head mac_hash
[MLX4_EN_MAC_HASH_SIZE
];
597 struct hwtstamp_config hwtstamp_config
;
599 #ifdef CONFIG_MLX4_EN_DCB
601 u16 maxrate
[IEEE_8021QAZ_MAX_TCS
];
603 #ifdef CONFIG_RFS_ACCEL
604 spinlock_t filters_lock
;
606 struct list_head filters
;
607 struct hlist_head filter_hash
[1 << MLX4_EN_FILTER_HASH_SHIFT
];
616 MLX4_EN_WOL_MAGIC
= (1ULL << 61),
617 MLX4_EN_WOL_ENABLED
= (1ULL << 62),
620 struct mlx4_mac_entry
{
621 struct hlist_node hlist
;
622 unsigned char mac
[ETH_ALEN
+ 2];
627 static inline struct mlx4_cqe
*mlx4_en_get_cqe(void *buf
, int idx
, int cqe_sz
)
629 return buf
+ idx
* cqe_sz
;
632 #ifdef CONFIG_NET_RX_BUSY_POLL
633 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq
*cq
)
635 spin_lock_init(&cq
->poll_lock
);
636 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
639 /* called from the device poll rutine to get ownership of a cq */
640 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq
*cq
)
643 spin_lock(&cq
->poll_lock
);
644 if (cq
->state
& MLX4_CQ_LOCKED
) {
645 WARN_ON(cq
->state
& MLX4_EN_CQ_STATE_NAPI
);
646 cq
->state
|= MLX4_EN_CQ_STATE_NAPI_YIELD
;
649 /* we don't care if someone yielded */
650 cq
->state
= MLX4_EN_CQ_STATE_NAPI
;
651 spin_unlock(&cq
->poll_lock
);
655 /* returns true is someone tried to get the cq while napi had it */
656 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq
*cq
)
659 spin_lock(&cq
->poll_lock
);
660 WARN_ON(cq
->state
& (MLX4_EN_CQ_STATE_POLL
|
661 MLX4_EN_CQ_STATE_NAPI_YIELD
));
663 if (cq
->state
& MLX4_EN_CQ_STATE_POLL_YIELD
)
665 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
666 spin_unlock(&cq
->poll_lock
);
670 /* called from mlx4_en_low_latency_poll() */
671 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq
*cq
)
674 spin_lock_bh(&cq
->poll_lock
);
675 if ((cq
->state
& MLX4_CQ_LOCKED
)) {
676 struct net_device
*dev
= cq
->dev
;
677 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
678 struct mlx4_en_rx_ring
*rx_ring
= priv
->rx_ring
[cq
->ring
];
680 cq
->state
|= MLX4_EN_CQ_STATE_POLL_YIELD
;
684 /* preserve yield marks */
685 cq
->state
|= MLX4_EN_CQ_STATE_POLL
;
686 spin_unlock_bh(&cq
->poll_lock
);
690 /* returns true if someone tried to get the cq while it was locked */
691 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq
*cq
)
694 spin_lock_bh(&cq
->poll_lock
);
695 WARN_ON(cq
->state
& (MLX4_EN_CQ_STATE_NAPI
));
697 if (cq
->state
& MLX4_EN_CQ_STATE_POLL_YIELD
)
699 cq
->state
= MLX4_EN_CQ_STATE_IDLE
;
700 spin_unlock_bh(&cq
->poll_lock
);
704 /* true if a socket is polling, even if it did not get the lock */
705 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq
*cq
)
707 WARN_ON(!(cq
->state
& MLX4_CQ_LOCKED
));
708 return cq
->state
& CQ_USER_PEND
;
711 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq
*cq
)
715 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq
*cq
)
720 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq
*cq
)
725 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq
*cq
)
730 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq
*cq
)
735 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq
*cq
)
739 #endif /* CONFIG_NET_RX_BUSY_POLL */
741 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
743 void mlx4_en_update_loopback_state(struct net_device
*dev
,
744 netdev_features_t features
);
746 void mlx4_en_destroy_netdev(struct net_device
*dev
);
747 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
748 struct mlx4_en_port_profile
*prof
);
750 int mlx4_en_start_port(struct net_device
*dev
);
751 void mlx4_en_stop_port(struct net_device
*dev
, int detach
);
753 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
);
754 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
);
756 int mlx4_en_create_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
**pcq
,
757 int entries
, int ring
, enum cq_type mode
, int node
);
758 void mlx4_en_destroy_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
**pcq
);
759 int mlx4_en_activate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
,
761 void mlx4_en_deactivate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
762 int mlx4_en_set_cq_moder(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
763 int mlx4_en_arm_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
);
765 void mlx4_en_tx_irq(struct mlx4_cq
*mcq
);
766 u16
mlx4_en_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
767 void *accel_priv
, select_queue_fallback_t fallback
);
768 netdev_tx_t
mlx4_en_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
770 int mlx4_en_create_tx_ring(struct mlx4_en_priv
*priv
,
771 struct mlx4_en_tx_ring
**pring
,
772 int qpn
, u32 size
, u16 stride
,
773 int node
, int queue_index
);
774 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv
*priv
,
775 struct mlx4_en_tx_ring
**pring
);
776 int mlx4_en_activate_tx_ring(struct mlx4_en_priv
*priv
,
777 struct mlx4_en_tx_ring
*ring
,
778 int cq
, int user_prio
);
779 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv
*priv
,
780 struct mlx4_en_tx_ring
*ring
);
781 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
);
782 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
783 struct mlx4_en_rx_ring
**pring
,
784 u32 size
, u16 stride
, int node
);
785 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
786 struct mlx4_en_rx_ring
**pring
,
787 u32 size
, u16 stride
);
788 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
);
789 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
790 struct mlx4_en_rx_ring
*ring
);
791 int mlx4_en_process_rx_cq(struct net_device
*dev
,
792 struct mlx4_en_cq
*cq
,
794 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
);
795 int mlx4_en_poll_tx_cq(struct napi_struct
*napi
, int budget
);
796 void mlx4_en_fill_qp_context(struct mlx4_en_priv
*priv
, int size
, int stride
,
797 int is_tx
, int rss
, int qpn
, int cqn
, int user_prio
,
798 struct mlx4_qp_context
*context
);
799 void mlx4_en_sqp_event(struct mlx4_qp
*qp
, enum mlx4_event event
);
800 int mlx4_en_map_buffer(struct mlx4_buf
*buf
);
801 void mlx4_en_unmap_buffer(struct mlx4_buf
*buf
);
803 void mlx4_en_calc_rx_buf(struct net_device
*dev
);
804 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
);
805 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
);
806 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
);
807 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
);
808 int mlx4_en_free_tx_buf(struct net_device
*dev
, struct mlx4_en_tx_ring
*ring
);
809 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
);
811 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
, u64 mac
, u64 clear
, u8 mode
);
812 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
);
814 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
);
815 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
);
817 #ifdef CONFIG_MLX4_EN_DCB
818 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops
;
819 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops
;
822 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
);
824 #ifdef CONFIG_RFS_ACCEL
825 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
);
828 #define MLX4_EN_NUM_SELF_TEST 5
829 void mlx4_en_ex_selftest(struct net_device
*dev
, u32
*flags
, u64
*buf
);
830 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev
*mdev
);
833 * Functions for time stamping
835 u64
mlx4_en_get_cqe_ts(struct mlx4_cqe
*cqe
);
836 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev
*mdev
,
837 struct skb_shared_hwtstamps
*hwts
,
839 void mlx4_en_init_timestamp(struct mlx4_en_dev
*mdev
);
840 void mlx4_en_remove_timestamp(struct mlx4_en_dev
*mdev
);
841 int mlx4_en_timestamp_config(struct net_device
*dev
,
847 extern const struct ethtool_ops mlx4_en_ethtool_ops
;
852 * printk / logging functions
856 void en_print(const char *level
, const struct mlx4_en_priv
*priv
,
857 const char *format
, ...);
859 #define en_dbg(mlevel, priv, format, ...) \
861 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
862 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
864 #define en_warn(priv, format, ...) \
865 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
866 #define en_err(priv, format, ...) \
867 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
868 #define en_info(priv, format, ...) \
869 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
871 #define mlx4_err(mdev, format, ...) \
872 pr_err(DRV_NAME " %s: " format, \
873 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
874 #define mlx4_info(mdev, format, ...) \
875 pr_info(DRV_NAME " %s: " format, \
876 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
877 #define mlx4_warn(mdev, format, ...) \
878 pr_warn(DRV_NAME " %s: " format, \
879 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)