net/mlx4_en: Add QCN parameters and statistics handling
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_dcb_nl.c
1 /*
2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
36
37 #include "mlx4_en.h"
38
39 /* Definitions for QCN
40 */
41
42 struct mlx4_congestion_control_mb_prio_802_1_qau_params {
43 __be32 modify_enable_high;
44 __be32 modify_enable_low;
45 __be32 reserved1;
46 __be32 extended_enable;
47 __be32 rppp_max_rps;
48 __be32 rpg_time_reset;
49 __be32 rpg_byte_reset;
50 __be32 rpg_threshold;
51 __be32 rpg_max_rate;
52 __be32 rpg_ai_rate;
53 __be32 rpg_hai_rate;
54 __be32 rpg_gd;
55 __be32 rpg_min_dec_fac;
56 __be32 rpg_min_rate;
57 __be32 max_time_rise;
58 __be32 max_byte_rise;
59 __be32 max_qdelta;
60 __be32 min_qoffset;
61 __be32 gd_coefficient;
62 __be32 reserved2[5];
63 __be32 cp_sample_base;
64 __be32 reserved3[39];
65 };
66
67 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
68 __be64 rppp_rp_centiseconds;
69 __be32 reserved1;
70 __be32 ignored_cnm;
71 __be32 rppp_created_rps;
72 __be32 estimated_total_rate;
73 __be32 max_active_rate_limiter_index;
74 __be32 dropped_cnms_busy_fw;
75 __be32 reserved2;
76 __be32 cnms_handled_successfully;
77 __be32 min_total_limiters_rate;
78 __be32 max_total_limiters_rate;
79 __be32 reserved3[4];
80 };
81
82 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
83 struct ieee_ets *ets)
84 {
85 struct mlx4_en_priv *priv = netdev_priv(dev);
86 struct ieee_ets *my_ets = &priv->ets;
87
88 /* No IEEE PFC settings available */
89 if (!my_ets)
90 return -EINVAL;
91
92 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
93 ets->cbs = my_ets->cbs;
94 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
95 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
96 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
97
98 return 0;
99 }
100
101 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
102 {
103 int i;
104 int total_ets_bw = 0;
105 int has_ets_tc = 0;
106
107 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
108 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
109 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
110 i, ets->prio_tc[i]);
111 return -EINVAL;
112 }
113
114 switch (ets->tc_tsa[i]) {
115 case IEEE_8021QAZ_TSA_STRICT:
116 break;
117 case IEEE_8021QAZ_TSA_ETS:
118 has_ets_tc = 1;
119 total_ets_bw += ets->tc_tx_bw[i];
120 break;
121 default:
122 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
123 i, ets->tc_tsa[i]);
124 return -ENOTSUPP;
125 }
126 }
127
128 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
129 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
130 total_ets_bw);
131 return -EINVAL;
132 }
133
134 return 0;
135 }
136
137 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
138 struct ieee_ets *ets, u16 *ratelimit)
139 {
140 struct mlx4_en_dev *mdev = priv->mdev;
141 int num_strict = 0;
142 int i;
143 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
144 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
145
146 ets = ets ?: &priv->ets;
147 ratelimit = ratelimit ?: priv->maxrate;
148
149 /* higher TC means higher priority => lower pg */
150 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
151 switch (ets->tc_tsa[i]) {
152 case IEEE_8021QAZ_TSA_STRICT:
153 pg[i] = num_strict++;
154 tc_tx_bw[i] = MLX4_EN_BW_MAX;
155 break;
156 case IEEE_8021QAZ_TSA_ETS:
157 pg[i] = MLX4_EN_TC_ETS;
158 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
159 break;
160 }
161 }
162
163 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
164 ratelimit);
165 }
166
167 static int
168 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
169 {
170 struct mlx4_en_priv *priv = netdev_priv(dev);
171 struct mlx4_en_dev *mdev = priv->mdev;
172 int err;
173
174 err = mlx4_en_ets_validate(priv, ets);
175 if (err)
176 return err;
177
178 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
179 if (err)
180 return err;
181
182 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
183 if (err)
184 return err;
185
186 memcpy(&priv->ets, ets, sizeof(priv->ets));
187
188 return 0;
189 }
190
191 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
192 struct ieee_pfc *pfc)
193 {
194 struct mlx4_en_priv *priv = netdev_priv(dev);
195
196 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
197 pfc->pfc_en = priv->prof->tx_ppp;
198
199 return 0;
200 }
201
202 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
203 struct ieee_pfc *pfc)
204 {
205 struct mlx4_en_priv *priv = netdev_priv(dev);
206 struct mlx4_en_port_profile *prof = priv->prof;
207 struct mlx4_en_dev *mdev = priv->mdev;
208 int err;
209
210 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
211 pfc->pfc_cap,
212 pfc->pfc_en,
213 pfc->mbc,
214 pfc->delay);
215
216 prof->rx_pause = !pfc->pfc_en;
217 prof->tx_pause = !pfc->pfc_en;
218 prof->rx_ppp = pfc->pfc_en;
219 prof->tx_ppp = pfc->pfc_en;
220
221 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
222 priv->rx_skb_size + ETH_FCS_LEN,
223 prof->tx_pause,
224 prof->tx_ppp,
225 prof->rx_pause,
226 prof->rx_ppp);
227 if (err)
228 en_err(priv, "Failed setting pause params\n");
229
230 return err;
231 }
232
233 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
234 {
235 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
236 }
237
238 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
239 {
240 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
241 (mode & DCB_CAP_DCBX_VER_CEE) ||
242 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
243 !(mode & DCB_CAP_DCBX_HOST))
244 return 1;
245
246 return 0;
247 }
248
249 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
250 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
251 struct ieee_maxrate *maxrate)
252 {
253 struct mlx4_en_priv *priv = netdev_priv(dev);
254 int i;
255
256 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
257 maxrate->tc_maxrate[i] =
258 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
259
260 return 0;
261 }
262
263 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
264 struct ieee_maxrate *maxrate)
265 {
266 struct mlx4_en_priv *priv = netdev_priv(dev);
267 u16 tmp[IEEE_8021QAZ_MAX_TCS];
268 int i, err;
269
270 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
271 /* Convert from Kbps into HW units, rounding result up.
272 * Setting to 0, means unlimited BW.
273 */
274 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
275 MLX4_RATELIMIT_UNITS_IN_KB - 1,
276 MLX4_RATELIMIT_UNITS_IN_KB);
277 }
278
279 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
280 if (err)
281 return err;
282
283 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
284
285 return 0;
286 }
287
288 #define RPG_ENABLE_BIT 31
289 #define CN_TAG_BIT 30
290
291 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
292 struct ieee_qcn *qcn)
293 {
294 struct mlx4_en_priv *priv = netdev_priv(dev);
295 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
296 struct mlx4_cmd_mailbox *mailbox_out = NULL;
297 u64 mailbox_in_dma = 0;
298 u32 inmod = 0;
299 int i, err;
300
301 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
302 return -EOPNOTSUPP;
303
304 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
305 if (IS_ERR(mailbox_out))
306 return -ENOMEM;
307 hw_qcn =
308 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
309 mailbox_out->buf;
310
311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
312 inmod = priv->port | ((1 << i) << 8) |
313 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
314 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
315 mailbox_out->dma,
316 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
317 MLX4_CMD_CONGESTION_CTRL_OPCODE,
318 MLX4_CMD_TIME_CLASS_C,
319 MLX4_CMD_NATIVE);
320 if (err) {
321 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
322 return err;
323 }
324
325 qcn->rpg_enable[i] =
326 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
327 qcn->rppp_max_rps[i] =
328 be32_to_cpu(hw_qcn->rppp_max_rps);
329 qcn->rpg_time_reset[i] =
330 be32_to_cpu(hw_qcn->rpg_time_reset);
331 qcn->rpg_byte_reset[i] =
332 be32_to_cpu(hw_qcn->rpg_byte_reset);
333 qcn->rpg_threshold[i] =
334 be32_to_cpu(hw_qcn->rpg_threshold);
335 qcn->rpg_max_rate[i] =
336 be32_to_cpu(hw_qcn->rpg_max_rate);
337 qcn->rpg_ai_rate[i] =
338 be32_to_cpu(hw_qcn->rpg_ai_rate);
339 qcn->rpg_hai_rate[i] =
340 be32_to_cpu(hw_qcn->rpg_hai_rate);
341 qcn->rpg_gd[i] =
342 be32_to_cpu(hw_qcn->rpg_gd);
343 qcn->rpg_min_dec_fac[i] =
344 be32_to_cpu(hw_qcn->rpg_min_dec_fac);
345 qcn->rpg_min_rate[i] =
346 be32_to_cpu(hw_qcn->rpg_min_rate);
347 qcn->cndd_state_machine[i] =
348 priv->cndd_state[i];
349 }
350 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
351 return 0;
352 }
353
354 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
355 struct ieee_qcn *qcn)
356 {
357 struct mlx4_en_priv *priv = netdev_priv(dev);
358 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
359 struct mlx4_cmd_mailbox *mailbox_in = NULL;
360 u64 mailbox_in_dma = 0;
361 u32 inmod = 0;
362 int i, err;
363 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
364 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
365
366 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
367 return -EOPNOTSUPP;
368
369 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
370 if (IS_ERR(mailbox_in))
371 return -ENOMEM;
372
373 mailbox_in_dma = mailbox_in->dma;
374 hw_qcn =
375 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
376 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
377 inmod = priv->port | ((1 << i) << 8) |
378 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
379
380 /* Before updating QCN parameter,
381 * need to set it's modify enable bit to 1
382 */
383
384 hw_qcn->modify_enable_high = cpu_to_be32(
385 MODIFY_ENABLE_HIGH_MASK);
386 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
387
388 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
389 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
390 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
391 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
392 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
393 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
394 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
395 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
396 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
397 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
398 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
399 priv->cndd_state[i] = qcn->cndd_state_machine[i];
400 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
401 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
402
403 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
404 MLX4_CONGESTION_CONTROL_SET_PARAMS,
405 MLX4_CMD_CONGESTION_CTRL_OPCODE,
406 MLX4_CMD_TIME_CLASS_C,
407 MLX4_CMD_NATIVE);
408 if (err) {
409 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
410 return err;
411 }
412 }
413 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
414 return 0;
415 }
416
417 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
418 struct ieee_qcn_stats *qcn_stats)
419 {
420 struct mlx4_en_priv *priv = netdev_priv(dev);
421 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
422 struct mlx4_cmd_mailbox *mailbox_out = NULL;
423 u64 mailbox_in_dma = 0;
424 u32 inmod = 0;
425 int i, err;
426
427 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
428 return -EOPNOTSUPP;
429
430 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
431 if (IS_ERR(mailbox_out))
432 return -ENOMEM;
433
434 hw_qcn_stats =
435 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
436 mailbox_out->buf;
437
438 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
439 inmod = priv->port | ((1 << i) << 8) |
440 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
441 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
442 mailbox_out->dma, inmod,
443 MLX4_CONGESTION_CONTROL_GET_STATISTICS,
444 MLX4_CMD_CONGESTION_CTRL_OPCODE,
445 MLX4_CMD_TIME_CLASS_C,
446 MLX4_CMD_NATIVE);
447 if (err) {
448 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
449 return err;
450 }
451 qcn_stats->rppp_rp_centiseconds[i] =
452 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
453 qcn_stats->rppp_created_rps[i] =
454 be32_to_cpu(hw_qcn_stats->rppp_created_rps);
455 }
456 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
457 return 0;
458 }
459
460 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
461 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
462 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
463 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
464 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
465 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
466 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
467
468 .getdcbx = mlx4_en_dcbnl_getdcbx,
469 .setdcbx = mlx4_en_dcbnl_setdcbx,
470 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
471 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
472 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
473 };
474
475 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
476 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
477 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
478
479 .getdcbx = mlx4_en_dcbnl_getdcbx,
480 .setdcbx = mlx4_en_dcbnl_setdcbx,
481 };
This page took 0.084108 seconds and 6 git commands to generate.