]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
Merge remote-tracking branches 'asoc/topic/cs35l32', 'asoc/topic/cs35l34', 'asoc...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_dcbnl.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include "en.h"
35
36 #define MLX5E_MAX_PRIORITY 8
37
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
40
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
43
44 enum {
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
47 };
48
49 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
50 MLX5_CAP_QCAM_REG(mdev, qpts) && \
51 MLX5_CAP_QCAM_REG(mdev, qpdpm))
52
53 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
54 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
55
56 /* If dcbx mode is non-host set the dcbx mode to host.
57 */
58 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
59 enum mlx5_dcbx_oper_mode mode)
60 {
61 struct mlx5_core_dev *mdev = priv->mdev;
62 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
63 int err;
64
65 err = mlx5_query_port_dcbx_param(mdev, param);
66 if (err)
67 return err;
68
69 MLX5_SET(dcbx_param, param, version_admin, mode);
70 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
71 MLX5_SET(dcbx_param, param, willing_admin, 1);
72
73 return mlx5_set_port_dcbx_param(mdev, param);
74 }
75
76 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
77 {
78 struct mlx5e_dcbx *dcbx = &priv->dcbx;
79 int err;
80
81 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
82 return 0;
83
84 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
85 return 0;
86
87 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
88 if (err)
89 return err;
90
91 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
92 return 0;
93 }
94
95 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
96 struct ieee_ets *ets)
97 {
98 struct mlx5e_priv *priv = netdev_priv(netdev);
99 struct mlx5_core_dev *mdev = priv->mdev;
100 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
101 bool is_tc_group_6_exist = false;
102 bool is_zero_bw_ets_tc = false;
103 int err = 0;
104 int i;
105
106 if (!MLX5_CAP_GEN(priv->mdev, ets))
107 return -EOPNOTSUPP;
108
109 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
110 for (i = 0; i < ets->ets_cap; i++) {
111 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
112 if (err)
113 return err;
114
115 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
116 if (err)
117 return err;
118
119 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
120 if (err)
121 return err;
122
123 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
124 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
125 is_zero_bw_ets_tc = true;
126
127 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
128 is_tc_group_6_exist = true;
129 }
130
131 /* Report 0% ets tc if exits*/
132 if (is_zero_bw_ets_tc) {
133 for (i = 0; i < ets->ets_cap; i++)
134 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
135 ets->tc_tx_bw[i] = 0;
136 }
137
138 /* Update tc_tsa based on fw setting*/
139 for (i = 0; i < ets->ets_cap; i++) {
140 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
141 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
142 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
143 !is_tc_group_6_exist)
144 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
145 }
146 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
147
148 return err;
149 }
150
151 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
152 {
153 bool any_tc_mapped_to_ets = false;
154 bool ets_zero_bw = false;
155 int strict_group;
156 int i;
157
158 for (i = 0; i <= max_tc; i++) {
159 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
160 any_tc_mapped_to_ets = true;
161 if (!ets->tc_tx_bw[i])
162 ets_zero_bw = true;
163 }
164 }
165
166 /* strict group has higher priority than ets group */
167 strict_group = MLX5E_LOWEST_PRIO_GROUP;
168 if (any_tc_mapped_to_ets)
169 strict_group++;
170 if (ets_zero_bw)
171 strict_group++;
172
173 for (i = 0; i <= max_tc; i++) {
174 switch (ets->tc_tsa[i]) {
175 case IEEE_8021QAZ_TSA_VENDOR:
176 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
177 break;
178 case IEEE_8021QAZ_TSA_STRICT:
179 tc_group[i] = strict_group++;
180 break;
181 case IEEE_8021QAZ_TSA_ETS:
182 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
183 if (ets->tc_tx_bw[i] && ets_zero_bw)
184 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
185 break;
186 }
187 }
188 }
189
190 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
191 u8 *tc_group, int max_tc)
192 {
193 int bw_for_ets_zero_bw_tc = 0;
194 int last_ets_zero_bw_tc = -1;
195 int num_ets_zero_bw = 0;
196 int i;
197
198 for (i = 0; i <= max_tc; i++) {
199 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
200 !ets->tc_tx_bw[i]) {
201 num_ets_zero_bw++;
202 last_ets_zero_bw_tc = i;
203 }
204 }
205
206 if (num_ets_zero_bw)
207 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
208
209 for (i = 0; i <= max_tc; i++) {
210 switch (ets->tc_tsa[i]) {
211 case IEEE_8021QAZ_TSA_VENDOR:
212 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
213 break;
214 case IEEE_8021QAZ_TSA_STRICT:
215 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
216 break;
217 case IEEE_8021QAZ_TSA_ETS:
218 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
219 ets->tc_tx_bw[i] :
220 bw_for_ets_zero_bw_tc;
221 break;
222 }
223 }
224
225 /* Make sure the total bw for ets zero bw group is 100% */
226 if (last_ets_zero_bw_tc != -1)
227 tc_tx_bw[last_ets_zero_bw_tc] +=
228 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
229 }
230
231 /* If there are ETS BW 0,
232 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
233 * Set group #0 to all the ETS BW 0 tcs and
234 * equally splits the 100% BW between them
235 * Report both group #0 and #1 as ETS type.
236 * All the tcs in group #0 will be reported with 0% BW.
237 */
238 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
239 {
240 struct mlx5_core_dev *mdev = priv->mdev;
241 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
242 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
243 int max_tc = mlx5_max_tc(mdev);
244 int err, i;
245
246 mlx5e_build_tc_group(ets, tc_group, max_tc);
247 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
248
249 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
250 if (err)
251 return err;
252
253 err = mlx5_set_port_tc_group(mdev, tc_group);
254 if (err)
255 return err;
256
257 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
258
259 if (err)
260 return err;
261
262 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
263
264 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
265 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
266 __func__, i, ets->prio_tc[i]);
267 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
268 __func__, i, tc_tx_bw[i], tc_group[i]);
269 }
270
271 return err;
272 }
273
274 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
275 struct ieee_ets *ets)
276 {
277 bool have_ets_tc = false;
278 int bw_sum = 0;
279 int i;
280
281 /* Validate Priority */
282 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
283 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
284 netdev_err(netdev,
285 "Failed to validate ETS: priority value greater than max(%d)\n",
286 MLX5E_MAX_PRIORITY);
287 return -EINVAL;
288 }
289 }
290
291 /* Validate Bandwidth Sum */
292 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
293 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
294 have_ets_tc = true;
295 bw_sum += ets->tc_tx_bw[i];
296 }
297 }
298
299 if (have_ets_tc && bw_sum != 100) {
300 netdev_err(netdev,
301 "Failed to validate ETS: BW sum is illegal\n");
302 return -EINVAL;
303 }
304 return 0;
305 }
306
307 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
308 struct ieee_ets *ets)
309 {
310 struct mlx5e_priv *priv = netdev_priv(netdev);
311 int err;
312
313 if (!MLX5_CAP_GEN(priv->mdev, ets))
314 return -EOPNOTSUPP;
315
316 err = mlx5e_dbcnl_validate_ets(netdev, ets);
317 if (err)
318 return err;
319
320 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
321 if (err)
322 return err;
323
324 return 0;
325 }
326
327 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
328 struct ieee_pfc *pfc)
329 {
330 struct mlx5e_priv *priv = netdev_priv(dev);
331 struct mlx5_core_dev *mdev = priv->mdev;
332 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
333 int i;
334
335 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
336 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
337 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
338 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
339 }
340
341 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
342 }
343
344 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
345 struct ieee_pfc *pfc)
346 {
347 struct mlx5e_priv *priv = netdev_priv(dev);
348 struct mlx5_core_dev *mdev = priv->mdev;
349 u8 curr_pfc_en;
350 int ret;
351
352 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
353
354 if (pfc->pfc_en == curr_pfc_en)
355 return 0;
356
357 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
358 mlx5_toggle_port_link(mdev);
359
360 if (!ret) {
361 mlx5e_dbg(HW, priv,
362 "%s: PFC per priority bit mask: 0x%x\n",
363 __func__, pfc->pfc_en);
364 }
365 return ret;
366 }
367
368 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
369 {
370 struct mlx5e_priv *priv = netdev_priv(dev);
371
372 return priv->dcbx.cap;
373 }
374
375 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
376 {
377 struct mlx5e_priv *priv = netdev_priv(dev);
378 struct mlx5e_dcbx *dcbx = &priv->dcbx;
379
380 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
381 return 1;
382
383 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
384 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
385 return 0;
386
387 /* set dcbx to fw controlled */
388 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
389 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
390 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
391 return 0;
392 }
393
394 return 1;
395 }
396
397 if (!(mode & DCB_CAP_DCBX_HOST))
398 return 1;
399
400 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
401 return 1;
402
403 dcbx->cap = mode;
404
405 return 0;
406 }
407
408 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
409 {
410 struct mlx5e_priv *priv = netdev_priv(dev);
411 struct dcb_app temp;
412 bool is_new;
413 int err;
414
415 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
416 return -EINVAL;
417
418 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
419 return -EINVAL;
420
421 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
422 return -EINVAL;
423
424 if (app->protocol >= MLX5E_MAX_DSCP)
425 return -EINVAL;
426
427 /* Save the old entry info */
428 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
429 temp.protocol = app->protocol;
430 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
431
432 /* Check if need to switch to dscp trust state */
433 if (!priv->dcbx.dscp_app_cnt) {
434 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
435 if (err)
436 return err;
437 }
438
439 /* Skip the fw command if new and old mapping are the same */
440 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
441 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
442 if (err)
443 goto fw_err;
444 }
445
446 /* Delete the old entry if exists */
447 is_new = false;
448 err = dcb_ieee_delapp(dev, &temp);
449 if (err)
450 is_new = true;
451
452 /* Add new entry and update counter */
453 err = dcb_ieee_setapp(dev, app);
454 if (err)
455 return err;
456
457 if (is_new)
458 priv->dcbx.dscp_app_cnt++;
459
460 return err;
461
462 fw_err:
463 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
464 return err;
465 }
466
467 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
468 {
469 struct mlx5e_priv *priv = netdev_priv(dev);
470 int err;
471
472 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
473 return -EINVAL;
474
475 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
476 return -EINVAL;
477
478 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
479 return -EINVAL;
480
481 if (app->protocol >= MLX5E_MAX_DSCP)
482 return -EINVAL;
483
484 /* Skip if no dscp app entry */
485 if (!priv->dcbx.dscp_app_cnt)
486 return -ENOENT;
487
488 /* Check if the entry matches fw setting */
489 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
490 return -ENOENT;
491
492 /* Delete the app entry */
493 err = dcb_ieee_delapp(dev, app);
494 if (err)
495 return err;
496
497 /* Reset the priority mapping back to zero */
498 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
499 if (err)
500 goto fw_err;
501
502 priv->dcbx.dscp_app_cnt--;
503
504 /* Check if need to switch to pcp trust state */
505 if (!priv->dcbx.dscp_app_cnt)
506 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
507
508 return err;
509
510 fw_err:
511 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
512 return err;
513 }
514
515 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
516 struct ieee_maxrate *maxrate)
517 {
518 struct mlx5e_priv *priv = netdev_priv(netdev);
519 struct mlx5_core_dev *mdev = priv->mdev;
520 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
521 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
522 int err;
523 int i;
524
525 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
526 if (err)
527 return err;
528
529 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
530
531 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
532 switch (max_bw_unit[i]) {
533 case MLX5_100_MBPS_UNIT:
534 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
535 break;
536 case MLX5_GBPS_UNIT:
537 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
538 break;
539 case MLX5_BW_NO_LIMIT:
540 break;
541 default:
542 WARN(true, "non-supported BW unit");
543 break;
544 }
545 }
546
547 return 0;
548 }
549
550 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
551 struct ieee_maxrate *maxrate)
552 {
553 struct mlx5e_priv *priv = netdev_priv(netdev);
554 struct mlx5_core_dev *mdev = priv->mdev;
555 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
556 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
557 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
558 int i;
559
560 memset(max_bw_value, 0, sizeof(max_bw_value));
561 memset(max_bw_unit, 0, sizeof(max_bw_unit));
562
563 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
564 if (!maxrate->tc_maxrate[i]) {
565 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
566 continue;
567 }
568 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
569 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
570 MLX5E_100MB);
571 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
572 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
573 } else {
574 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
575 MLX5E_1GB);
576 max_bw_unit[i] = MLX5_GBPS_UNIT;
577 }
578 }
579
580 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
581 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
582 __func__, i, max_bw_value[i]);
583 }
584
585 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
586 }
587
588 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
589 {
590 struct mlx5e_priv *priv = netdev_priv(netdev);
591 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
592 struct mlx5_core_dev *mdev = priv->mdev;
593 struct ieee_ets ets;
594 struct ieee_pfc pfc;
595 int err = -EOPNOTSUPP;
596 int i;
597
598 if (!MLX5_CAP_GEN(mdev, ets))
599 goto out;
600
601 memset(&ets, 0, sizeof(ets));
602 memset(&pfc, 0, sizeof(pfc));
603
604 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
605 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
606 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
607 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
608 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
609 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
610 mlx5e_dbg(HW, priv,
611 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
612 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
613 ets.prio_tc[i]);
614 }
615
616 err = mlx5e_dbcnl_validate_ets(netdev, &ets);
617 if (err) {
618 netdev_err(netdev,
619 "%s, Failed to validate ETS: %d\n", __func__, err);
620 goto out;
621 }
622
623 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
624 if (err) {
625 netdev_err(netdev,
626 "%s, Failed to set ETS: %d\n", __func__, err);
627 goto out;
628 }
629
630 /* Set PFC */
631 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
632 if (!cee_cfg->pfc_enable)
633 pfc.pfc_en = 0;
634 else
635 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
636 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
637
638 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
639 if (err) {
640 netdev_err(netdev,
641 "%s, Failed to set PFC: %d\n", __func__, err);
642 goto out;
643 }
644 out:
645 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
646 }
647
648 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
649 {
650 return MLX5E_CEE_STATE_UP;
651 }
652
653 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
654 u8 *perm_addr)
655 {
656 struct mlx5e_priv *priv = netdev_priv(netdev);
657
658 if (!perm_addr)
659 return;
660
661 memset(perm_addr, 0xff, MAX_ADDR_LEN);
662
663 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
664 }
665
666 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
667 int priority, u8 prio_type,
668 u8 pgid, u8 bw_pct, u8 up_map)
669 {
670 struct mlx5e_priv *priv = netdev_priv(netdev);
671 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
672
673 if (priority >= CEE_DCBX_MAX_PRIO) {
674 netdev_err(netdev,
675 "%s, priority is out of range\n", __func__);
676 return;
677 }
678
679 if (pgid >= CEE_DCBX_MAX_PGS) {
680 netdev_err(netdev,
681 "%s, priority group is out of range\n", __func__);
682 return;
683 }
684
685 cee_cfg->prio_to_pg_map[priority] = pgid;
686 }
687
688 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
689 int pgid, u8 bw_pct)
690 {
691 struct mlx5e_priv *priv = netdev_priv(netdev);
692 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
693
694 if (pgid >= CEE_DCBX_MAX_PGS) {
695 netdev_err(netdev,
696 "%s, priority group is out of range\n", __func__);
697 return;
698 }
699
700 cee_cfg->pg_bw_pct[pgid] = bw_pct;
701 }
702
703 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
704 int priority, u8 *prio_type,
705 u8 *pgid, u8 *bw_pct, u8 *up_map)
706 {
707 struct mlx5e_priv *priv = netdev_priv(netdev);
708 struct mlx5_core_dev *mdev = priv->mdev;
709
710 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
711 netdev_err(netdev, "%s, ets is not supported\n", __func__);
712 return;
713 }
714
715 if (priority >= CEE_DCBX_MAX_PRIO) {
716 netdev_err(netdev,
717 "%s, priority is out of range\n", __func__);
718 return;
719 }
720
721 *prio_type = 0;
722 *bw_pct = 0;
723 *up_map = 0;
724
725 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
726 *pgid = 0;
727 }
728
729 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
730 int pgid, u8 *bw_pct)
731 {
732 struct ieee_ets ets;
733
734 if (pgid >= CEE_DCBX_MAX_PGS) {
735 netdev_err(netdev,
736 "%s, priority group is out of range\n", __func__);
737 return;
738 }
739
740 mlx5e_dcbnl_ieee_getets(netdev, &ets);
741 *bw_pct = ets.tc_tx_bw[pgid];
742 }
743
744 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
745 int priority, u8 setting)
746 {
747 struct mlx5e_priv *priv = netdev_priv(netdev);
748 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
749
750 if (priority >= CEE_DCBX_MAX_PRIO) {
751 netdev_err(netdev,
752 "%s, priority is out of range\n", __func__);
753 return;
754 }
755
756 if (setting > 1)
757 return;
758
759 cee_cfg->pfc_setting[priority] = setting;
760 }
761
762 static int
763 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
764 int priority, u8 *setting)
765 {
766 struct ieee_pfc pfc;
767 int err;
768
769 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
770
771 if (err)
772 *setting = 0;
773 else
774 *setting = (pfc.pfc_en >> priority) & 0x01;
775
776 return err;
777 }
778
779 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
780 int priority, u8 *setting)
781 {
782 if (priority >= CEE_DCBX_MAX_PRIO) {
783 netdev_err(netdev,
784 "%s, priority is out of range\n", __func__);
785 return;
786 }
787
788 if (!setting)
789 return;
790
791 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
792 }
793
794 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
795 int capid, u8 *cap)
796 {
797 struct mlx5e_priv *priv = netdev_priv(netdev);
798 struct mlx5_core_dev *mdev = priv->mdev;
799 u8 rval = 0;
800
801 switch (capid) {
802 case DCB_CAP_ATTR_PG:
803 *cap = true;
804 break;
805 case DCB_CAP_ATTR_PFC:
806 *cap = true;
807 break;
808 case DCB_CAP_ATTR_UP2TC:
809 *cap = false;
810 break;
811 case DCB_CAP_ATTR_PG_TCS:
812 *cap = 1 << mlx5_max_tc(mdev);
813 break;
814 case DCB_CAP_ATTR_PFC_TCS:
815 *cap = 1 << mlx5_max_tc(mdev);
816 break;
817 case DCB_CAP_ATTR_GSP:
818 *cap = false;
819 break;
820 case DCB_CAP_ATTR_BCN:
821 *cap = false;
822 break;
823 case DCB_CAP_ATTR_DCBX:
824 *cap = priv->dcbx.cap |
825 DCB_CAP_DCBX_VER_CEE |
826 DCB_CAP_DCBX_VER_IEEE;
827 break;
828 default:
829 *cap = 0;
830 rval = 1;
831 break;
832 }
833
834 return rval;
835 }
836
837 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
838 int tcs_id, u8 *num)
839 {
840 struct mlx5e_priv *priv = netdev_priv(netdev);
841 struct mlx5_core_dev *mdev = priv->mdev;
842
843 switch (tcs_id) {
844 case DCB_NUMTCS_ATTR_PG:
845 case DCB_NUMTCS_ATTR_PFC:
846 *num = mlx5_max_tc(mdev) + 1;
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 return 0;
853 }
854
855 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
856 {
857 struct ieee_pfc pfc;
858
859 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
860 return MLX5E_CEE_STATE_DOWN;
861
862 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
863 }
864
865 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
866 {
867 struct mlx5e_priv *priv = netdev_priv(netdev);
868 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
869
870 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
871 return;
872
873 cee_cfg->pfc_enable = state;
874 }
875
876 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
877 .ieee_getets = mlx5e_dcbnl_ieee_getets,
878 .ieee_setets = mlx5e_dcbnl_ieee_setets,
879 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
880 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
881 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
882 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
883 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
884 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
885 .getdcbx = mlx5e_dcbnl_getdcbx,
886 .setdcbx = mlx5e_dcbnl_setdcbx,
887
888 /* CEE interfaces */
889 .setall = mlx5e_dcbnl_setall,
890 .getstate = mlx5e_dcbnl_getstate,
891 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
892
893 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
894 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
895 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
896 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
897
898 .setpfccfg = mlx5e_dcbnl_setpfccfg,
899 .getpfccfg = mlx5e_dcbnl_getpfccfg,
900 .getcap = mlx5e_dcbnl_getcap,
901 .getnumtcs = mlx5e_dcbnl_getnumtcs,
902 .getpfcstate = mlx5e_dcbnl_getpfcstate,
903 .setpfcstate = mlx5e_dcbnl_setpfcstate,
904 };
905
906 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
907 enum mlx5_dcbx_oper_mode *mode)
908 {
909 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
910
911 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
912
913 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
914 *mode = MLX5_GET(dcbx_param, out, version_oper);
915
916 /* From driver's point of view, we only care if the mode
917 * is host (HOST) or non-host (AUTO)
918 */
919 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
920 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
921 }
922
923 static void mlx5e_ets_init(struct mlx5e_priv *priv)
924 {
925 struct ieee_ets ets;
926 int err;
927 int i;
928
929 if (!MLX5_CAP_GEN(priv->mdev, ets))
930 return;
931
932 memset(&ets, 0, sizeof(ets));
933 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
934 for (i = 0; i < ets.ets_cap; i++) {
935 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
936 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
937 ets.prio_tc[i] = i;
938 }
939
940 if (ets.ets_cap > 1) {
941 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
942 ets.prio_tc[0] = 1;
943 ets.prio_tc[1] = 0;
944 }
945
946 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
947 if (err)
948 netdev_err(priv->netdev,
949 "%s, Failed to init ETS: %d\n", __func__, err);
950 }
951
952 enum {
953 INIT,
954 DELETE,
955 };
956
957 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
958 {
959 struct dcb_app temp;
960 int i;
961
962 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
963 return;
964
965 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
966 return;
967
968 /* No SEL_DSCP entry in non DSCP state */
969 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
970 return;
971
972 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
973 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
974 temp.protocol = i;
975 temp.priority = priv->dcbx_dp.dscp2prio[i];
976 if (action == INIT)
977 dcb_ieee_setapp(priv->netdev, &temp);
978 else
979 dcb_ieee_delapp(priv->netdev, &temp);
980 }
981
982 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
983 }
984
985 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
986 {
987 mlx5e_dcbnl_dscp_app(priv, INIT);
988 }
989
990 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
991 {
992 mlx5e_dcbnl_dscp_app(priv, DELETE);
993 }
994
995 static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
996 struct mlx5e_params *params)
997 {
998 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
999 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
1000 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1001 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1002 }
1003
1004 static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
1005 {
1006 struct mlx5e_channels new_channels = {};
1007
1008 mutex_lock(&priv->state_lock);
1009
1010 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1011 goto out;
1012
1013 new_channels.params = priv->channels.params;
1014 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
1015
1016 /* Skip if tx_min_inline is the same */
1017 if (new_channels.params.tx_min_inline_mode ==
1018 priv->channels.params.tx_min_inline_mode)
1019 goto out;
1020
1021 if (mlx5e_open_channels(priv, &new_channels))
1022 goto out;
1023 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1024
1025 out:
1026 mutex_unlock(&priv->state_lock);
1027 }
1028
1029 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1030 {
1031 int err;
1032
1033 err = mlx5_set_trust_state(priv->mdev, trust_state);
1034 if (err)
1035 return err;
1036 priv->dcbx_dp.trust_state = trust_state;
1037 mlx5e_trust_update_sq_inline_mode(priv);
1038
1039 return err;
1040 }
1041
1042 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1043 {
1044 int err;
1045
1046 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1047 if (err)
1048 return err;
1049
1050 priv->dcbx_dp.dscp2prio[dscp] = prio;
1051 return err;
1052 }
1053
1054 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1055 {
1056 struct mlx5_core_dev *mdev = priv->mdev;
1057 int err;
1058
1059 if (!MLX5_DSCP_SUPPORTED(mdev))
1060 return 0;
1061
1062 err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
1063 if (err)
1064 return err;
1065
1066 mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
1067
1068 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1069 if (err)
1070 return err;
1071
1072 return 0;
1073 }
1074
1075 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1076 {
1077 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1078
1079 mlx5e_trust_initialize(priv);
1080
1081 if (!MLX5_CAP_GEN(priv->mdev, qos))
1082 return;
1083
1084 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1085 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1086
1087 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1088 DCB_CAP_DCBX_VER_IEEE;
1089 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1090 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1091
1092 mlx5e_ets_init(priv);
1093 }