2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
37 #include <linux/mlx4/device.h>
38 #include <linux/mlx4/cmd.h>
44 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
)
46 struct mlx4_cmd_mailbox
*mailbox
;
47 struct mlx4_set_vlan_fltr_mbox
*filter
;
54 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
56 return PTR_ERR(mailbox
);
58 filter
= mailbox
->buf
;
59 for (i
= VLAN_FLTR_SIZE
- 1; i
>= 0; i
--) {
61 for (j
= 0; j
< 32; j
++)
62 if (test_bit(index
++, priv
->active_vlans
))
64 filter
->entry
[i
] = cpu_to_be32(entry
);
66 err
= mlx4_cmd(dev
, mailbox
->dma
, priv
->port
, 0, MLX4_CMD_SET_VLAN_FLTR
,
67 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
68 mlx4_free_cmd_mailbox(dev
, mailbox
);
72 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
)
74 struct mlx4_en_query_port_context
*qport_context
;
75 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
76 struct mlx4_en_port_state
*state
= &priv
->port_state
;
77 struct mlx4_cmd_mailbox
*mailbox
;
80 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
82 return PTR_ERR(mailbox
);
83 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
84 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
88 qport_context
= mailbox
->buf
;
90 /* This command is always accessed from Ethtool context
91 * already synchronized, no need in locking */
92 state
->link_state
= !!(qport_context
->link_up
& MLX4_EN_LINK_UP_MASK
);
93 switch (qport_context
->link_speed
& MLX4_EN_SPEED_MASK
) {
94 case MLX4_EN_1G_SPEED
:
95 state
->link_speed
= 1000;
97 case MLX4_EN_10G_SPEED_XAUI
:
98 case MLX4_EN_10G_SPEED_XFI
:
99 state
->link_speed
= 10000;
101 case MLX4_EN_40G_SPEED
:
102 state
->link_speed
= 40000;
105 state
->link_speed
= -1;
108 state
->transciver
= qport_context
->transceiver
;
111 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
115 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
)
117 struct mlx4_en_stat_out_mbox
*mlx4_en_stats
;
118 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
119 struct net_device_stats
*stats
= &priv
->stats
;
120 struct mlx4_cmd_mailbox
*mailbox
;
121 u64 in_mod
= reset
<< 8 | port
;
125 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
127 return PTR_ERR(mailbox
);
128 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, in_mod
, 0,
129 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
134 mlx4_en_stats
= mailbox
->buf
;
136 spin_lock_bh(&priv
->stats_lock
);
138 stats
->rx_packets
= 0;
140 priv
->port_stats
.rx_chksum_good
= 0;
141 priv
->port_stats
.rx_chksum_none
= 0;
142 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
143 stats
->rx_packets
+= priv
->rx_ring
[i
]->packets
;
144 stats
->rx_bytes
+= priv
->rx_ring
[i
]->bytes
;
145 priv
->port_stats
.rx_chksum_good
+= priv
->rx_ring
[i
]->csum_ok
;
146 priv
->port_stats
.rx_chksum_none
+= priv
->rx_ring
[i
]->csum_none
;
148 stats
->tx_packets
= 0;
150 priv
->port_stats
.tx_chksum_offload
= 0;
151 priv
->port_stats
.queue_stopped
= 0;
152 priv
->port_stats
.wake_queue
= 0;
153 priv
->port_stats
.tso_packets
= 0;
154 priv
->port_stats
.xmit_more
= 0;
156 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
157 const struct mlx4_en_tx_ring
*ring
= priv
->tx_ring
[i
];
159 stats
->tx_packets
+= ring
->packets
;
160 stats
->tx_bytes
+= ring
->bytes
;
161 priv
->port_stats
.tx_chksum_offload
+= ring
->tx_csum
;
162 priv
->port_stats
.queue_stopped
+= ring
->queue_stopped
;
163 priv
->port_stats
.wake_queue
+= ring
->wake_queue
;
164 priv
->port_stats
.tso_packets
+= ring
->tso_packets
;
165 priv
->port_stats
.xmit_more
+= ring
->xmit_more
;
168 stats
->rx_errors
= be64_to_cpu(mlx4_en_stats
->PCS
) +
169 be32_to_cpu(mlx4_en_stats
->RdropLength
) +
170 be32_to_cpu(mlx4_en_stats
->RJBBR
) +
171 be32_to_cpu(mlx4_en_stats
->RCRC
) +
172 be32_to_cpu(mlx4_en_stats
->RRUNT
);
173 stats
->tx_errors
= be32_to_cpu(mlx4_en_stats
->TDROP
);
174 stats
->multicast
= be64_to_cpu(mlx4_en_stats
->MCAST_prio_0
) +
175 be64_to_cpu(mlx4_en_stats
->MCAST_prio_1
) +
176 be64_to_cpu(mlx4_en_stats
->MCAST_prio_2
) +
177 be64_to_cpu(mlx4_en_stats
->MCAST_prio_3
) +
178 be64_to_cpu(mlx4_en_stats
->MCAST_prio_4
) +
179 be64_to_cpu(mlx4_en_stats
->MCAST_prio_5
) +
180 be64_to_cpu(mlx4_en_stats
->MCAST_prio_6
) +
181 be64_to_cpu(mlx4_en_stats
->MCAST_prio_7
) +
182 be64_to_cpu(mlx4_en_stats
->MCAST_novlan
);
183 stats
->collisions
= 0;
184 stats
->rx_length_errors
= be32_to_cpu(mlx4_en_stats
->RdropLength
);
185 stats
->rx_over_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
186 stats
->rx_crc_errors
= be32_to_cpu(mlx4_en_stats
->RCRC
);
187 stats
->rx_frame_errors
= 0;
188 stats
->rx_fifo_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
189 stats
->rx_missed_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
190 stats
->tx_aborted_errors
= 0;
191 stats
->tx_carrier_errors
= 0;
192 stats
->tx_fifo_errors
= 0;
193 stats
->tx_heartbeat_errors
= 0;
194 stats
->tx_window_errors
= 0;
196 priv
->pkstats
.broadcast
=
197 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_0
) +
198 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_1
) +
199 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_2
) +
200 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_3
) +
201 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_4
) +
202 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_5
) +
203 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_6
) +
204 be64_to_cpu(mlx4_en_stats
->RBCAST_prio_7
) +
205 be64_to_cpu(mlx4_en_stats
->RBCAST_novlan
);
206 priv
->pkstats
.rx_prio
[0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_0
);
207 priv
->pkstats
.rx_prio
[1] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_1
);
208 priv
->pkstats
.rx_prio
[2] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_2
);
209 priv
->pkstats
.rx_prio
[3] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_3
);
210 priv
->pkstats
.rx_prio
[4] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_4
);
211 priv
->pkstats
.rx_prio
[5] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_5
);
212 priv
->pkstats
.rx_prio
[6] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_6
);
213 priv
->pkstats
.rx_prio
[7] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_7
);
214 priv
->pkstats
.tx_prio
[0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_0
);
215 priv
->pkstats
.tx_prio
[1] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_1
);
216 priv
->pkstats
.tx_prio
[2] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_2
);
217 priv
->pkstats
.tx_prio
[3] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_3
);
218 priv
->pkstats
.tx_prio
[4] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_4
);
219 priv
->pkstats
.tx_prio
[5] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_5
);
220 priv
->pkstats
.tx_prio
[6] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_6
);
221 priv
->pkstats
.tx_prio
[7] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_7
);
222 spin_unlock_bh(&priv
->stats_lock
);
225 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);