]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
net: hns3: fix get wrong pfc_en when query PFC configuration
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_dcb.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
cacde272
YL
3
4#include "hclge_main.h"
a61432d4 5#include "hclge_dcb.h"
cacde272
YL
6#include "hclge_tm.h"
7#include "hnae3.h"
8
9#define BW_PERCENT 100
10
11static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
12 struct ieee_ets *ets)
13{
14 u8 i;
15
16 for (i = 0; i < HNAE3_MAX_TC; i++) {
17 switch (ets->tc_tsa[i]) {
18 case IEEE_8021QAZ_TSA_STRICT:
19 hdev->tm_info.tc_info[i].tc_sch_mode =
20 HCLGE_SCH_MODE_SP;
21 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 break;
23 case IEEE_8021QAZ_TSA_ETS:
24 hdev->tm_info.tc_info[i].tc_sch_mode =
25 HCLGE_SCH_MODE_DWRR;
26 hdev->tm_info.pg_info[0].tc_dwrr[i] =
27 ets->tc_tx_bw[i];
28 break;
29 default:
30 /* Hardware only supports SP (strict priority)
31 * or ETS (enhanced transmission selection)
32 * algorithms, if we receive some other value
33 * from dcbnl, then throw an error.
34 */
35 return -EINVAL;
36 }
37 }
38
e432abfb
YL
39 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
40
41 return 0;
cacde272
YL
42}
43
44static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
45 struct ieee_ets *ets)
46{
47 u32 i;
48
49 memset(ets, 0, sizeof(*ets));
50 ets->willing = 1;
51 ets->ets_cap = hdev->tc_max;
52
53 for (i = 0; i < HNAE3_MAX_TC; i++) {
54 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
56
57 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
58 HCLGE_SCH_MODE_SP)
59 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
60 else
61 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
62 }
63}
64
65/* IEEE std */
66static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
67{
68 struct hclge_vport *vport = hclge_get_vport(h);
69 struct hclge_dev *hdev = vport->back;
70
71 hclge_tm_info_to_ieee_ets(hdev, ets);
72
73 return 0;
74}
75
e432abfb
YL
76static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
77 u8 *prio_tc)
78{
79 int i;
80
81 if (num_tc > hdev->tc_max) {
82 dev_err(&hdev->pdev->dev,
83 "tc num checking failed, %u > tc_max(%u)\n",
84 num_tc, hdev->tc_max);
85 return -EINVAL;
86 }
87
88 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
89 if (prio_tc[i] >= num_tc) {
90 dev_err(&hdev->pdev->dev,
adcf738b 91 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
e432abfb
YL
92 i, prio_tc[i], num_tc);
93 return -EINVAL;
94 }
95 }
96
de67a690
YL
97 if (num_tc > hdev->vport[0].alloc_tqps) {
98 dev_err(&hdev->pdev->dev,
99 "allocated tqp checking failed, %u > tqp(%u)\n",
100 num_tc, hdev->vport[0].alloc_tqps);
101 return -EINVAL;
e432abfb
YL
102 }
103
104 return 0;
105}
106
cacde272
YL
107static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
108 u8 *tc, bool *changed)
109{
adefc0a2 110 bool has_ets_tc = false;
cacde272
YL
111 u32 total_ets_bw = 0;
112 u8 max_tc = 0;
e432abfb 113 int ret;
cacde272
YL
114 u8 i;
115
e432abfb 116 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
cacde272
YL
117 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
118 *changed = true;
119
120 if (ets->prio_tc[i] > max_tc)
121 max_tc = ets->prio_tc[i];
e432abfb 122 }
cacde272 123
e432abfb
YL
124 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
125 if (ret)
126 return ret;
127
c2d56897 128 for (i = 0; i < hdev->tc_max; i++) {
cacde272
YL
129 switch (ets->tc_tsa[i]) {
130 case IEEE_8021QAZ_TSA_STRICT:
131 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
132 HCLGE_SCH_MODE_SP)
133 *changed = true;
134 break;
135 case IEEE_8021QAZ_TSA_ETS:
136 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
137 HCLGE_SCH_MODE_DWRR)
138 *changed = true;
139
140 total_ets_bw += ets->tc_tx_bw[i];
adefc0a2
JS
141 has_ets_tc = true;
142 break;
cacde272
YL
143 default:
144 return -EINVAL;
145 }
146 }
147
adefc0a2 148 if (has_ets_tc && total_ets_bw != BW_PERCENT)
cacde272
YL
149 return -EINVAL;
150
151 *tc = max_tc + 1;
152 if (*tc != hdev->tm_info.num_tc)
153 *changed = true;
154
155 return 0;
156}
157
a1ef124e 158static int hclge_map_update(struct hclge_dev *hdev)
cacde272 159{
cacde272
YL
160 int ret;
161
9e5157ba 162 ret = hclge_tm_schd_setup_hw(hdev);
cacde272
YL
163 if (ret)
164 return ret;
165
44e59e37 166 ret = hclge_pause_setup_hw(hdev, false);
cacde272
YL
167 if (ret)
168 return ret;
169
170 ret = hclge_buffer_alloc(hdev);
171 if (ret)
172 return ret;
173
268f5dfa
YL
174 hclge_rss_indir_init_cfg(hdev);
175
cacde272
YL
176 return hclge_rss_init_hw(hdev);
177}
178
179static int hclge_client_setup_tc(struct hclge_dev *hdev)
180{
181 struct hclge_vport *vport = hdev->vport;
182 struct hnae3_client *client;
183 struct hnae3_handle *handle;
184 int ret;
185 u32 i;
186
187 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
188 handle = &vport[i].nic;
189 client = handle->client;
190
191 if (!client || !client->ops || !client->ops->setup_tc)
192 continue;
193
194 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
195 if (ret)
196 return ret;
197 }
198
199 return 0;
200}
201
199d2dd4
YL
202static int hclge_notify_down_uinit(struct hclge_dev *hdev)
203{
204 int ret;
205
206 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
207 if (ret)
208 return ret;
209
210 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
211}
212
213static int hclge_notify_init_up(struct hclge_dev *hdev)
214{
215 int ret;
216
217 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
218 if (ret)
219 return ret;
220
221 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
222}
223
cacde272
YL
224static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
225{
226 struct hclge_vport *vport = hclge_get_vport(h);
1c822948 227 struct net_device *netdev = h->kinfo.netdev;
cacde272
YL
228 struct hclge_dev *hdev = vport->back;
229 bool map_changed = false;
230 u8 num_tc = 0;
231 int ret;
232
30d240df
YL
233 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
234 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
cacde272
YL
235 return -EINVAL;
236
237 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
238 if (ret)
239 return ret;
240
af013903 241 if (map_changed) {
1c822948
YL
242 netif_dbg(h, drv, netdev, "set ets\n");
243
199d2dd4 244 ret = hclge_notify_down_uinit(hdev);
af013903
HT
245 if (ret)
246 return ret;
247 }
248
e432abfb 249 hclge_tm_schd_info_update(hdev, num_tc);
cacde272
YL
250
251 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
252 if (ret)
1821dce9 253 goto err_out;
cacde272
YL
254
255 if (map_changed) {
a1ef124e
YL
256 ret = hclge_map_update(hdev);
257 if (ret)
258 goto err_out;
259
cacde272
YL
260 ret = hclge_client_setup_tc(hdev);
261 if (ret)
1821dce9
YL
262 goto err_out;
263
199d2dd4 264 ret = hclge_notify_init_up(hdev);
af013903
HT
265 if (ret)
266 return ret;
cacde272
YL
267 }
268
269 return hclge_tm_dwrr_cfg(hdev);
1821dce9
YL
270
271err_out:
272 if (!map_changed)
273 return ret;
274
199d2dd4 275 hclge_notify_init_up(hdev);
1821dce9 276
1821dce9 277 return ret;
cacde272
YL
278}
279
280static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
281{
64fd2300 282 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
cacde272
YL
283 struct hclge_vport *vport = hclge_get_vport(h);
284 struct hclge_dev *hdev = vport->back;
64fd2300 285 int ret;
bd330dd4 286 u8 i;
cacde272
YL
287
288 memset(pfc, 0, sizeof(*pfc));
289 pfc->pfc_cap = hdev->pfc_max;
bd330dd4 290 pfc->pfc_en = hdev->tm_info.pfc_en;
cacde272 291
64fd2300
PL
292 ret = hclge_pfc_tx_stats_get(hdev, requests);
293 if (ret)
294 return ret;
295
296 ret = hclge_pfc_rx_stats_get(hdev, indications);
297 if (ret)
298 return ret;
299
300 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
301 pfc->requests[i] = requests[i];
302 pfc->indications[i] = indications[i];
303 }
cacde272
YL
304 return 0;
305}
306
307static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
308{
309 struct hclge_vport *vport = hclge_get_vport(h);
1c822948 310 struct net_device *netdev = h->kinfo.netdev;
cacde272
YL
311 struct hclge_dev *hdev = vport->back;
312 u8 i, j, pfc_map, *prio_tc;
aea8cfb3 313 int ret;
cacde272 314
30d240df
YL
315 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
316 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
cacde272
YL
317 return -EINVAL;
318
d3ad430a
YL
319 if (pfc->pfc_en == hdev->tm_info.pfc_en)
320 return 0;
321
cacde272
YL
322 prio_tc = hdev->tm_info.prio_tc;
323 pfc_map = 0;
324
325 for (i = 0; i < hdev->tm_info.num_tc; i++) {
326 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
327 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
328 pfc_map |= BIT(i);
329 break;
330 }
331 }
332 }
333
cacde272 334 hdev->tm_info.hw_pfc_map = pfc_map;
d3ad430a 335 hdev->tm_info.pfc_en = pfc->pfc_en;
cacde272 336
1c822948 337 netif_dbg(h, drv, netdev,
96e65abb 338 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
1c822948
YL
339 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
340
ae179b2f
YL
341 hclge_tm_pfc_info_update(hdev);
342
aea8cfb3
YL
343 ret = hclge_pause_setup_hw(hdev, false);
344 if (ret)
345 return ret;
346
347 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
348 if (ret)
349 return ret;
350
351 ret = hclge_buffer_alloc(hdev);
352 if (ret) {
353 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
354 return ret;
355 }
356
357 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
cacde272
YL
358}
359
360/* DCBX configuration */
361static u8 hclge_getdcbx(struct hnae3_handle *h)
362{
363 struct hclge_vport *vport = hclge_get_vport(h);
364 struct hclge_dev *hdev = vport->back;
365
30d240df
YL
366 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
367 return 0;
368
cacde272
YL
369 return hdev->dcbx_cap;
370}
371
372static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
373{
374 struct hclge_vport *vport = hclge_get_vport(h);
1c822948 375 struct net_device *netdev = h->kinfo.netdev;
cacde272
YL
376 struct hclge_dev *hdev = vport->back;
377
1c822948
YL
378 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
379
cacde272
YL
380 /* No support for LLD_MANAGED modes or CEE */
381 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
382 (mode & DCB_CAP_DCBX_VER_CEE) ||
383 !(mode & DCB_CAP_DCBX_HOST))
384 return 1;
385
386 hdev->dcbx_cap = mode;
387
388 return 0;
389}
390
5a5c9091
JS
391static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
392 struct tc_mqprio_qopt_offload *mqprio_qopt)
393{
394 u16 queue_sum = 0;
395 int ret;
396 int i;
397
398 if (!mqprio_qopt->qopt.num_tc) {
399 mqprio_qopt->qopt.num_tc = 1;
400 return 0;
401 }
402
403 ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
404 mqprio_qopt->qopt.prio_tc_map);
405 if (ret)
406 return ret;
407
408 for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
409 if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
410 dev_err(&hdev->pdev->dev,
411 "qopt queue count must be power of 2\n");
412 return -EINVAL;
413 }
414
f1c2e66d 415 if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
5a5c9091
JS
416 dev_err(&hdev->pdev->dev,
417 "qopt queue count should be no more than %u\n",
f1c2e66d 418 hdev->pf_rss_size_max);
5a5c9091
JS
419 return -EINVAL;
420 }
421
422 if (mqprio_qopt->qopt.offset[i] != queue_sum) {
423 dev_err(&hdev->pdev->dev,
424 "qopt queue offset must start from 0, and being continuous\n");
425 return -EINVAL;
426 }
427
428 if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
429 dev_err(&hdev->pdev->dev,
430 "qopt tx_rate is not supported\n");
431 return -EOPNOTSUPP;
432 }
433
434 queue_sum = mqprio_qopt->qopt.offset[i];
435 queue_sum += mqprio_qopt->qopt.count[i];
436 }
437 if (hdev->vport[0].alloc_tqps < queue_sum) {
438 dev_err(&hdev->pdev->dev,
439 "qopt queue count sum should be less than %u\n",
440 hdev->vport[0].alloc_tqps);
441 return -EINVAL;
442 }
443
444 return 0;
445}
446
447static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
448 struct tc_mqprio_qopt_offload *mqprio_qopt)
449{
450 int i;
451
452 memset(tc_info, 0, sizeof(*tc_info));
453 tc_info->num_tc = mqprio_qopt->qopt.num_tc;
454 memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
455 sizeof_field(struct hnae3_tc_info, prio_tc));
456 memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
457 sizeof_field(struct hnae3_tc_info, tqp_count));
458 memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
459 sizeof_field(struct hnae3_tc_info, tqp_offset));
460
461 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
462 set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
463}
464
465static int hclge_config_tc(struct hclge_dev *hdev,
466 struct hnae3_tc_info *tc_info)
467{
468 int i;
469
470 hclge_tm_schd_info_update(hdev, tc_info->num_tc);
471 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
472 hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
473
474 return hclge_map_update(hdev);
475}
476
30d240df 477/* Set up TC for hardware offloaded mqprio in channel mode */
5a5c9091
JS
478static int hclge_setup_tc(struct hnae3_handle *h,
479 struct tc_mqprio_qopt_offload *mqprio_qopt)
30d240df
YL
480{
481 struct hclge_vport *vport = hclge_get_vport(h);
5a5c9091 482 struct hnae3_knic_private_info *kinfo;
30d240df 483 struct hclge_dev *hdev = vport->back;
5a5c9091
JS
484 struct hnae3_tc_info old_tc_info;
485 u8 tc = mqprio_qopt->qopt.num_tc;
30d240df
YL
486 int ret;
487
5a5c9091
JS
488 /* if client unregistered, it's not allowed to change
489 * mqprio configuration, which may cause uninit ring
490 * fail.
491 */
492 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
493 return -EBUSY;
494
30d240df
YL
495 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
496 return -EINVAL;
497
5a5c9091
JS
498 ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
499 if (ret) {
500 dev_err(&hdev->pdev->dev,
501 "failed to check mqprio qopt params, ret = %d\n", ret);
502 return ret;
503 }
30d240df 504
199d2dd4 505 ret = hclge_notify_down_uinit(hdev);
c2a39d98
YL
506 if (ret)
507 return ret;
508
5a5c9091
JS
509 kinfo = &vport->nic.kinfo;
510 memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
511 hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
512 kinfo->tc_info.mqprio_active = tc > 0;
c2a39d98 513
5a5c9091 514 ret = hclge_config_tc(hdev, &kinfo->tc_info);
1cce5eb6
YL
515 if (ret)
516 goto err_out;
30d240df
YL
517
518 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
519
520 if (tc > 1)
521 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
522 else
523 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
524
199d2dd4 525 return hclge_notify_init_up(hdev);
1cce5eb6
YL
526
527err_out:
5a5c9091
JS
528 /* roll-back */
529 memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
530 if (hclge_config_tc(hdev, &kinfo->tc_info))
531 dev_err(&hdev->pdev->dev,
532 "failed to roll back tc configuration\n");
533
199d2dd4 534 hclge_notify_init_up(hdev);
1cce5eb6 535
1cce5eb6 536 return ret;
30d240df
YL
537}
538
cacde272
YL
539static const struct hnae3_dcb_ops hns3_dcb_ops = {
540 .ieee_getets = hclge_ieee_getets,
541 .ieee_setets = hclge_ieee_setets,
542 .ieee_getpfc = hclge_ieee_getpfc,
543 .ieee_setpfc = hclge_ieee_setpfc,
544 .getdcbx = hclge_getdcbx,
545 .setdcbx = hclge_setdcbx,
30d240df 546 .setup_tc = hclge_setup_tc,
cacde272
YL
547};
548
549void hclge_dcb_ops_set(struct hclge_dev *hdev)
550{
551 struct hclge_vport *vport = hdev->vport;
552 struct hnae3_knic_private_info *kinfo;
553
554 /* Hdev does not support DCB or vport is
555 * not a pf, then dcb_ops is not set.
556 */
557 if (!hnae3_dev_dcb_supported(hdev) ||
558 vport->vport_id != 0)
559 return;
560
561 kinfo = &vport->nic.kinfo;
562 kinfo->dcb_ops = &hns3_dcb_ops;
563 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
564}