]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
net: hns3: Add some interface for the support of DCB feature
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
CommitLineData
84844054
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11
12#include "hclge_cmd.h"
13#include "hclge_main.h"
14#include "hclge_tm.h"
15
16enum hclge_shaper_level {
17 HCLGE_SHAPER_LVL_PRI = 0,
18 HCLGE_SHAPER_LVL_PG = 1,
19 HCLGE_SHAPER_LVL_PORT = 2,
20 HCLGE_SHAPER_LVL_QSET = 3,
21 HCLGE_SHAPER_LVL_CNT = 4,
22 HCLGE_SHAPER_LVL_VF = 0,
23 HCLGE_SHAPER_LVL_PF = 1,
24};
25
26#define HCLGE_SHAPER_BS_U_DEF 1
27#define HCLGE_SHAPER_BS_S_DEF 4
28
29#define HCLGE_ETHER_MAX_RATE 100000
30
31/* hclge_shaper_para_calc: calculate ir parameter for the shaper
32 * @ir: Rate to be config, its unit is Mbps
33 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34 * @ir_b: IR_B parameter of IR shaper
35 * @ir_u: IR_U parameter of IR shaper
36 * @ir_s: IR_S parameter of IR shaper
37 *
38 * the formula:
39 *
40 * IR_b * (2 ^ IR_u) * 8
41 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
42 * Tick * (2 ^ IR_s)
43 *
44 * @return: 0: calculate sucessful, negative: fail
45 */
46static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 u8 *ir_b, u8 *ir_u, u8 *ir_s)
48{
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
54 };
55 u8 ir_u_calc = 0, ir_s_calc = 0;
56 u32 ir_calc;
57 u32 tick;
58
59 /* Calc tick */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 return -EINVAL;
62
63 tick = tick_array[shaper_level];
64
65 /**
66 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 * the formula is changed to:
68 * 126 * 1 * 8
69 * ir_calc = ---------------- * 1000
70 * tick * 1
71 */
72 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73
74 if (ir_calc == ir) {
75 *ir_b = 126;
76 *ir_u = 0;
77 *ir_s = 0;
78
79 return 0;
80 } else if (ir_calc > ir) {
81 /* Increasing the denominator to select ir_s value */
82 while (ir_calc > ir) {
83 ir_s_calc++;
84 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 }
86
87 if (ir_calc == ir)
88 *ir_b = 126;
89 else
90 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 } else {
92 /* Increasing the numerator to select ir_u value */
93 u32 numerator;
94
95 while (ir_calc < ir) {
96 ir_u_calc++;
97 numerator = 1008000 * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
99 }
100
101 if (ir_calc == ir) {
102 *ir_b = 126;
103 } else {
104 u32 denominator = (8000 * (1 << --ir_u_calc));
105 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 }
107 }
108
109 *ir_u = ir_u_calc;
110 *ir_s = ir_s_calc;
111
112 return 0;
113}
114
115static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116{
117 struct hclge_desc desc;
118
119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120
121 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123
124 return hclge_cmd_send(&hdev->hw, &desc, 1);
125}
126
9dc2145d
YL
127static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
128 u8 pfc_bitmap)
129{
130 struct hclge_desc desc;
131 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
132
133 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
134
135 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
136 pfc->pri_en_bitmap = pfc_bitmap;
137
138 return hclge_cmd_send(&hdev->hw, &desc, 1);
139}
140
84844054
S
141static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
142{
143 u8 tc;
144
c5795c53 145 tc = hdev->tm_info.prio_tc[pri_id];
84844054
S
146
147 if (tc >= hdev->tm_info.num_tc)
148 return -EINVAL;
149
150 /**
151 * the register for priority has four bytes, the first bytes includes
152 * priority0 and priority1, the higher 4bit stands for priority1
153 * while the lower 4bit stands for priority0, as below:
154 * first byte: | pri_1 | pri_0 |
155 * second byte: | pri_3 | pri_2 |
156 * third byte: | pri_5 | pri_4 |
157 * fourth byte: | pri_7 | pri_6 |
158 */
159 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
160
161 return 0;
162}
163
164static int hclge_up_to_tc_map(struct hclge_dev *hdev)
165{
166 struct hclge_desc desc;
167 u8 *pri = (u8 *)desc.data;
168 u8 pri_id;
169 int ret;
170
171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
172
c5795c53 173 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
84844054
S
174 ret = hclge_fill_pri_array(hdev, pri, pri_id);
175 if (ret)
176 return ret;
177 }
178
179 return hclge_cmd_send(&hdev->hw, &desc, 1);
180}
181
182static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
183 u8 pg_id, u8 pri_bit_map)
184{
185 struct hclge_pg_to_pri_link_cmd *map;
186 struct hclge_desc desc;
187
188 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
189
190 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
191
192 map->pg_id = pg_id;
193 map->pri_bit_map = pri_bit_map;
194
195 return hclge_cmd_send(&hdev->hw, &desc, 1);
196}
197
198static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
199 u16 qs_id, u8 pri)
200{
201 struct hclge_qs_to_pri_link_cmd *map;
202 struct hclge_desc desc;
203
204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
205
206 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
207
208 map->qs_id = cpu_to_le16(qs_id);
209 map->priority = pri;
210 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
211
212 return hclge_cmd_send(&hdev->hw, &desc, 1);
213}
214
215static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
216 u8 q_id, u16 qs_id)
217{
218 struct hclge_nq_to_qs_link_cmd *map;
219 struct hclge_desc desc;
220
221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
222
223 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
224
225 map->nq_id = cpu_to_le16(q_id);
226 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
227
228 return hclge_cmd_send(&hdev->hw, &desc, 1);
229}
230
231static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
232 u8 dwrr)
233{
234 struct hclge_pg_weight_cmd *weight;
235 struct hclge_desc desc;
236
237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
238
239 weight = (struct hclge_pg_weight_cmd *)desc.data;
240
241 weight->pg_id = pg_id;
242 weight->dwrr = dwrr;
243
244 return hclge_cmd_send(&hdev->hw, &desc, 1);
245}
246
247static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
248 u8 dwrr)
249{
250 struct hclge_priority_weight_cmd *weight;
251 struct hclge_desc desc;
252
253 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
254
255 weight = (struct hclge_priority_weight_cmd *)desc.data;
256
257 weight->pri_id = pri_id;
258 weight->dwrr = dwrr;
259
260 return hclge_cmd_send(&hdev->hw, &desc, 1);
261}
262
263static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
264 u8 dwrr)
265{
266 struct hclge_qs_weight_cmd *weight;
267 struct hclge_desc desc;
268
269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
270
271 weight = (struct hclge_qs_weight_cmd *)desc.data;
272
273 weight->qs_id = cpu_to_le16(qs_id);
274 weight->dwrr = dwrr;
275
276 return hclge_cmd_send(&hdev->hw, &desc, 1);
277}
278
279static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
280 enum hclge_shap_bucket bucket, u8 pg_id,
281 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
282{
283 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
284 enum hclge_opcode_type opcode;
285 struct hclge_desc desc;
286
287 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
288 HCLGE_OPC_TM_PG_C_SHAPPING;
289 hclge_cmd_setup_basic_desc(&desc, opcode, false);
290
291 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
292
293 shap_cfg_cmd->pg_id = pg_id;
294
c4726338
YL
295 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
296 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
297 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
298 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
299 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
84844054
S
300
301 return hclge_cmd_send(&hdev->hw, &desc, 1);
302}
303
0a5677d3
YL
304static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
305{
306 struct hclge_port_shapping_cmd *shap_cfg_cmd;
307 struct hclge_desc desc;
308 u32 shapping_para = 0;
309 u8 ir_u, ir_b, ir_s;
310 int ret;
311
312 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
313 HCLGE_SHAPER_LVL_PORT,
314 &ir_b, &ir_u, &ir_s);
315 if (ret)
316 return ret;
317
318 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
319 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
320
321 hclge_tm_set_field(shapping_para, IR_B, ir_b);
322 hclge_tm_set_field(shapping_para, IR_U, ir_u);
323 hclge_tm_set_field(shapping_para, IR_S, ir_s);
324 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
325 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
326
327 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
328
329 return hclge_cmd_send(&hdev->hw, &desc, 1);
330}
331
84844054
S
332static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
333 enum hclge_shap_bucket bucket, u8 pri_id,
334 u8 ir_b, u8 ir_u, u8 ir_s,
335 u8 bs_b, u8 bs_s)
336{
337 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
338 enum hclge_opcode_type opcode;
339 struct hclge_desc desc;
340
341 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
342 HCLGE_OPC_TM_PRI_C_SHAPPING;
343
344 hclge_cmd_setup_basic_desc(&desc, opcode, false);
345
346 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
347
348 shap_cfg_cmd->pri_id = pri_id;
349
c4726338
YL
350 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
351 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
352 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
353 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
354 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
84844054
S
355
356 return hclge_cmd_send(&hdev->hw, &desc, 1);
357}
358
359static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
360{
361 struct hclge_desc desc;
362
363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
364
365 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
366 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
367 else
368 desc.data[1] = 0;
369
370 desc.data[0] = cpu_to_le32(pg_id);
371
372 return hclge_cmd_send(&hdev->hw, &desc, 1);
373}
374
375static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
376{
377 struct hclge_desc desc;
378
379 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
380
381 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
382 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
383 else
384 desc.data[1] = 0;
385
386 desc.data[0] = cpu_to_le32(pri_id);
387
388 return hclge_cmd_send(&hdev->hw, &desc, 1);
389}
390
cc9bb43a 391static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
84844054
S
392{
393 struct hclge_desc desc;
394
395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
396
cc9bb43a 397 if (mode == HCLGE_SCH_MODE_DWRR)
84844054
S
398 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
399 else
400 desc.data[1] = 0;
401
402 desc.data[0] = cpu_to_le32(qs_id);
403
404 return hclge_cmd_send(&hdev->hw, &desc, 1);
405}
406
407static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
408{
409 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
410 struct hclge_desc desc;
411
412 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
413 false);
414
415 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
416
417 bp_to_qs_map_cmd->tc_id = tc;
418
419 /* Qset and tc is one by one mapping */
420 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
421
422 return hclge_cmd_send(&hdev->hw, &desc, 1);
423}
424
425static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
426{
427 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
428 struct hclge_dev *hdev = vport->back;
429 u8 i;
430
431 kinfo = &vport->nic.kinfo;
432 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
433 kinfo->num_tc =
434 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
435 kinfo->rss_size
436 = min_t(u16, hdev->rss_size_max,
437 kinfo->num_tqps / kinfo->num_tc);
438 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
439 vport->dwrr = 100; /* 100 percent as init */
68ece54e 440 vport->alloc_rss_size = kinfo->rss_size;
84844054
S
441
442 for (i = 0; i < kinfo->num_tc; i++) {
443 if (hdev->hw_tc_map & BIT(i)) {
444 kinfo->tc_info[i].enable = true;
445 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
446 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
447 kinfo->tc_info[i].tc = i;
84844054
S
448 } else {
449 /* Set to default queue if TC is disable */
450 kinfo->tc_info[i].enable = false;
451 kinfo->tc_info[i].tqp_offset = 0;
452 kinfo->tc_info[i].tqp_count = 1;
453 kinfo->tc_info[i].tc = 0;
84844054
S
454 }
455 }
c5795c53
YL
456
457 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
458 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
84844054
S
459}
460
461static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
462{
463 struct hclge_vport *vport = hdev->vport;
464 u32 i;
465
466 for (i = 0; i < hdev->num_alloc_vport; i++) {
467 hclge_tm_vport_tc_info_update(vport);
468
469 vport++;
470 }
471}
472
473static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
474{
475 u8 i;
476
477 for (i = 0; i < hdev->tm_info.num_tc; i++) {
478 hdev->tm_info.tc_info[i].tc_id = i;
479 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
84844054
S
480 hdev->tm_info.tc_info[i].pgid = 0;
481 hdev->tm_info.tc_info[i].bw_limit =
482 hdev->tm_info.pg_info[0].bw_limit;
483 }
484
c5795c53
YL
485 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
486 hdev->tm_info.prio_tc[i] =
487 (i >= hdev->tm_info.num_tc) ? 0 : i;
488
84844054
S
489 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
490}
491
492static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
493{
494 u8 i;
495
496 for (i = 0; i < hdev->tm_info.num_pg; i++) {
497 int k;
498
499 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
500
501 hdev->tm_info.pg_info[i].pg_id = i;
502 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
503
504 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
505
506 if (i != 0)
507 continue;
508
509 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
510 for (k = 0; k < hdev->tm_info.num_tc; k++)
511 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
512 }
513}
514
515static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
516{
517 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
518 (hdev->tm_info.num_pg != 1))
519 return -EINVAL;
520
521 hclge_tm_pg_info_init(hdev);
522
523 hclge_tm_tc_info_init(hdev);
524
525 hclge_tm_vport_info_update(hdev);
526
527 hdev->tm_info.fc_mode = HCLGE_FC_NONE;
528 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
529
530 return 0;
531}
532
533static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
534{
535 int ret;
536 u32 i;
537
538 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
539 return 0;
540
541 for (i = 0; i < hdev->tm_info.num_pg; i++) {
542 /* Cfg mapping */
543 ret = hclge_tm_pg_to_pri_map_cfg(
544 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
545 if (ret)
546 return ret;
547 }
548
549 return 0;
550}
551
552static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
553{
554 u8 ir_u, ir_b, ir_s;
555 int ret;
556 u32 i;
557
558 /* Cfg pg schd */
559 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
560 return 0;
561
562 /* Pg to pri */
563 for (i = 0; i < hdev->tm_info.num_pg; i++) {
564 /* Calc shaper para */
565 ret = hclge_shaper_para_calc(
566 hdev->tm_info.pg_info[i].bw_limit,
567 HCLGE_SHAPER_LVL_PG,
568 &ir_b, &ir_u, &ir_s);
569 if (ret)
570 return ret;
571
572 ret = hclge_tm_pg_shapping_cfg(hdev,
573 HCLGE_TM_SHAP_C_BUCKET, i,
574 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
575 HCLGE_SHAPER_BS_S_DEF);
576 if (ret)
577 return ret;
578
579 ret = hclge_tm_pg_shapping_cfg(hdev,
580 HCLGE_TM_SHAP_P_BUCKET, i,
581 ir_b, ir_u, ir_s,
582 HCLGE_SHAPER_BS_U_DEF,
583 HCLGE_SHAPER_BS_S_DEF);
584 if (ret)
585 return ret;
586 }
587
588 return 0;
589}
590
591static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
592{
593 int ret;
594 u32 i;
595
596 /* cfg pg schd */
597 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
598 return 0;
599
600 /* pg to prio */
601 for (i = 0; i < hdev->tm_info.num_pg; i++) {
602 /* Cfg dwrr */
603 ret = hclge_tm_pg_weight_cfg(hdev, i,
604 hdev->tm_info.pg_dwrr[i]);
605 if (ret)
606 return ret;
607 }
608
609 return 0;
610}
611
612static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
613 struct hclge_vport *vport)
614{
615 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
616 struct hnae3_queue **tqp = kinfo->tqp;
617 struct hnae3_tc_info *v_tc_info;
618 u32 i, j;
619 int ret;
620
621 for (i = 0; i < kinfo->num_tc; i++) {
622 v_tc_info = &kinfo->tc_info[i];
623 for (j = 0; j < v_tc_info->tqp_count; j++) {
624 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
625
626 ret = hclge_tm_q_to_qs_map_cfg(hdev,
627 hclge_get_queue_id(q),
628 vport->qs_offset + i);
629 if (ret)
630 return ret;
631 }
632 }
633
634 return 0;
635}
636
637static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
638{
639 struct hclge_vport *vport = hdev->vport;
640 int ret;
cc9bb43a 641 u32 i, k;
84844054
S
642
643 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
644 /* Cfg qs -> pri mapping, one by one mapping */
cc9bb43a
YL
645 for (k = 0; k < hdev->num_alloc_vport; k++)
646 for (i = 0; i < hdev->tm_info.num_tc; i++) {
647 ret = hclge_tm_qs_to_pri_map_cfg(
648 hdev, vport[k].qs_offset + i, i);
649 if (ret)
650 return ret;
651 }
84844054 652 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
84844054
S
653 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
654 for (k = 0; k < hdev->num_alloc_vport; k++)
655 for (i = 0; i < HNAE3_MAX_TC; i++) {
656 ret = hclge_tm_qs_to_pri_map_cfg(
657 hdev, vport[k].qs_offset + i, k);
658 if (ret)
659 return ret;
660 }
661 } else {
662 return -EINVAL;
663 }
664
665 /* Cfg q -> qs mapping */
666 for (i = 0; i < hdev->num_alloc_vport; i++) {
667 ret = hclge_vport_q_to_qs_map(hdev, vport);
668 if (ret)
669 return ret;
670
671 vport++;
672 }
673
674 return 0;
675}
676
677static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
678{
679 u8 ir_u, ir_b, ir_s;
680 int ret;
681 u32 i;
682
683 for (i = 0; i < hdev->tm_info.num_tc; i++) {
684 ret = hclge_shaper_para_calc(
685 hdev->tm_info.tc_info[i].bw_limit,
686 HCLGE_SHAPER_LVL_PRI,
687 &ir_b, &ir_u, &ir_s);
688 if (ret)
689 return ret;
690
691 ret = hclge_tm_pri_shapping_cfg(
692 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
693 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
694 HCLGE_SHAPER_BS_S_DEF);
695 if (ret)
696 return ret;
697
698 ret = hclge_tm_pri_shapping_cfg(
699 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
700 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
701 HCLGE_SHAPER_BS_S_DEF);
702 if (ret)
703 return ret;
704 }
705
706 return 0;
707}
708
709static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
710{
711 struct hclge_dev *hdev = vport->back;
712 u8 ir_u, ir_b, ir_s;
713 int ret;
714
715 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
716 &ir_b, &ir_u, &ir_s);
717 if (ret)
718 return ret;
719
720 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
721 vport->vport_id,
722 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
723 HCLGE_SHAPER_BS_S_DEF);
724 if (ret)
725 return ret;
726
727 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
728 vport->vport_id,
729 ir_b, ir_u, ir_s,
730 HCLGE_SHAPER_BS_U_DEF,
731 HCLGE_SHAPER_BS_S_DEF);
732 if (ret)
733 return ret;
734
735 return 0;
736}
737
738static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
739{
740 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
741 struct hclge_dev *hdev = vport->back;
742 struct hnae3_tc_info *v_tc_info;
743 u8 ir_u, ir_b, ir_s;
744 u32 i;
745 int ret;
746
747 for (i = 0; i < kinfo->num_tc; i++) {
748 v_tc_info = &kinfo->tc_info[i];
749 ret = hclge_shaper_para_calc(
750 hdev->tm_info.tc_info[i].bw_limit,
751 HCLGE_SHAPER_LVL_QSET,
752 &ir_b, &ir_u, &ir_s);
753 if (ret)
754 return ret;
755 }
756
757 return 0;
758}
759
760static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
761{
762 struct hclge_vport *vport = hdev->vport;
763 int ret;
764 u32 i;
765
766 /* Need config vport shaper */
767 for (i = 0; i < hdev->num_alloc_vport; i++) {
768 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
769 if (ret)
770 return ret;
771
772 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
773 if (ret)
774 return ret;
775
776 vport++;
777 }
778
779 return 0;
780}
781
782static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
783{
784 int ret;
785
786 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
787 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
788 if (ret)
789 return ret;
790 } else {
791 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
792 if (ret)
793 return ret;
794 }
795
796 return 0;
797}
798
799static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
800{
cc9bb43a 801 struct hclge_vport *vport = hdev->vport;
84844054
S
802 struct hclge_pg_info *pg_info;
803 u8 dwrr;
804 int ret;
cc9bb43a 805 u32 i, k;
84844054
S
806
807 for (i = 0; i < hdev->tm_info.num_tc; i++) {
808 pg_info =
809 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
810 dwrr = pg_info->tc_dwrr[i];
811
812 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
813 if (ret)
814 return ret;
815
cc9bb43a
YL
816 for (k = 0; k < hdev->num_alloc_vport; k++) {
817 ret = hclge_tm_qs_weight_cfg(
818 hdev, vport[k].qs_offset + i,
819 vport[k].dwrr);
820 if (ret)
821 return ret;
822 }
84844054
S
823 }
824
825 return 0;
826}
827
828static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
829{
830 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
831 struct hclge_dev *hdev = vport->back;
832 int ret;
833 u8 i;
834
835 /* Vf dwrr */
836 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
837 if (ret)
838 return ret;
839
840 /* Qset dwrr */
841 for (i = 0; i < kinfo->num_tc; i++) {
842 ret = hclge_tm_qs_weight_cfg(
843 hdev, vport->qs_offset + i,
844 hdev->tm_info.pg_info[0].tc_dwrr[i]);
845 if (ret)
846 return ret;
847 }
848
849 return 0;
850}
851
852static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
853{
854 struct hclge_vport *vport = hdev->vport;
855 int ret;
856 u32 i;
857
858 for (i = 0; i < hdev->num_alloc_vport; i++) {
859 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
860 if (ret)
861 return ret;
862
863 vport++;
864 }
865
866 return 0;
867}
868
869static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
870{
871 int ret;
872
873 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
874 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
875 if (ret)
876 return ret;
877 } else {
878 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
879 if (ret)
880 return ret;
881 }
882
883 return 0;
884}
885
77f255c1 886int hclge_tm_map_cfg(struct hclge_dev *hdev)
84844054
S
887{
888 int ret;
889
77f255c1
YL
890 ret = hclge_up_to_tc_map(hdev);
891 if (ret)
892 return ret;
893
84844054
S
894 ret = hclge_tm_pg_to_pri_map(hdev);
895 if (ret)
896 return ret;
897
898 return hclge_tm_pri_q_qs_cfg(hdev);
899}
900
901static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
902{
903 int ret;
904
0a5677d3
YL
905 ret = hclge_tm_port_shaper_cfg(hdev);
906 if (ret)
907 return ret;
908
84844054
S
909 ret = hclge_tm_pg_shaper_cfg(hdev);
910 if (ret)
911 return ret;
912
913 return hclge_tm_pri_shaper_cfg(hdev);
914}
915
916int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
917{
918 int ret;
919
920 ret = hclge_tm_pg_dwrr_cfg(hdev);
921 if (ret)
922 return ret;
923
924 return hclge_tm_pri_dwrr_cfg(hdev);
925}
926
927static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
928{
929 int ret;
930 u8 i;
931
932 /* Only being config on TC-Based scheduler mode */
933 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
934 return 0;
935
936 for (i = 0; i < hdev->tm_info.num_pg; i++) {
937 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
938 if (ret)
939 return ret;
940 }
941
942 return 0;
943}
944
945static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
946{
947 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
948 struct hclge_dev *hdev = vport->back;
949 int ret;
950 u8 i;
951
952 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
953 if (ret)
954 return ret;
955
956 for (i = 0; i < kinfo->num_tc; i++) {
cc9bb43a
YL
957 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
958
959 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
960 sch_mode);
84844054
S
961 if (ret)
962 return ret;
963 }
964
965 return 0;
966}
967
968static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
969{
970 struct hclge_vport *vport = hdev->vport;
971 int ret;
cc9bb43a 972 u8 i, k;
84844054
S
973
974 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
975 for (i = 0; i < hdev->tm_info.num_tc; i++) {
976 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
977 if (ret)
978 return ret;
979
cc9bb43a
YL
980 for (k = 0; k < hdev->num_alloc_vport; k++) {
981 ret = hclge_tm_qs_schd_mode_cfg(
982 hdev, vport[k].qs_offset + i,
983 HCLGE_SCH_MODE_DWRR);
984 if (ret)
985 return ret;
986 }
84844054
S
987 }
988 } else {
989 for (i = 0; i < hdev->num_alloc_vport; i++) {
990 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
991 if (ret)
992 return ret;
993
994 vport++;
995 }
996 }
997
998 return 0;
999}
1000
77f255c1 1001int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
84844054
S
1002{
1003 int ret;
1004
1005 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1006 if (ret)
1007 return ret;
1008
1009 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1010}
1011
1012static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1013{
1014 int ret;
1015
1016 /* Cfg tm mapping */
1017 ret = hclge_tm_map_cfg(hdev);
1018 if (ret)
1019 return ret;
1020
1021 /* Cfg tm shaper */
1022 ret = hclge_tm_shaper_cfg(hdev);
1023 if (ret)
1024 return ret;
1025
1026 /* Cfg dwrr */
1027 ret = hclge_tm_dwrr_cfg(hdev);
1028 if (ret)
1029 return ret;
1030
1031 /* Cfg schd mode for each level schd */
1032 return hclge_tm_schd_mode_hw(hdev);
1033}
1034
9dc2145d
YL
1035static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1036{
1037 u8 enable_bitmap = 0;
1038
1039 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1040 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1041 HCLGE_RX_MAC_PAUSE_EN_MSK;
1042
1043 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1044 hdev->tm_info.hw_pfc_map);
1045}
1046
1047static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1048{
1049 bool tx_en, rx_en;
1050
1051 switch (hdev->tm_info.fc_mode) {
1052 case HCLGE_FC_NONE:
1053 tx_en = false;
1054 rx_en = false;
1055 break;
1056 case HCLGE_FC_RX_PAUSE:
1057 tx_en = false;
1058 rx_en = true;
1059 break;
1060 case HCLGE_FC_TX_PAUSE:
1061 tx_en = true;
1062 rx_en = false;
1063 break;
1064 case HCLGE_FC_FULL:
1065 tx_en = true;
1066 rx_en = true;
1067 break;
1068 default:
1069 tx_en = true;
1070 rx_en = true;
1071 }
1072
1073 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1074}
1075
84844054
S
1076int hclge_pause_setup_hw(struct hclge_dev *hdev)
1077{
84844054
S
1078 int ret;
1079 u8 i;
1080
9dc2145d
YL
1081 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
1082 return hclge_mac_pause_setup_hw(hdev);
84844054 1083
9dc2145d 1084 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
2daf4a65
YL
1085 if (!hnae3_dev_dcb_supported(hdev))
1086 return 0;
1087
9dc2145d
YL
1088 /* When MAC is GE Mode, hdev does not support pfc setting */
1089 ret = hclge_pfc_setup_hw(hdev);
1090 if (ret)
1091 dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
1092
84844054
S
1093 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1094 ret = hclge_tm_qs_bp_cfg(hdev, i);
1095 if (ret)
1096 return ret;
1097 }
1098
77f255c1
YL
1099 return 0;
1100}
1101
1102int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1103{
1104 struct hclge_vport *vport = hdev->vport;
1105 struct hnae3_knic_private_info *kinfo;
1106 u32 i, k;
1107
1108 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1109 if (prio_tc[i] >= hdev->tm_info.num_tc)
1110 return -EINVAL;
1111 hdev->tm_info.prio_tc[i] = prio_tc[i];
1112
1113 for (k = 0; k < hdev->num_alloc_vport; k++) {
1114 kinfo = &vport[k].nic.kinfo;
1115 kinfo->prio_tc[i] = prio_tc[i];
1116 }
1117 }
1118 return 0;
1119}
1120
1121void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1122{
1123 u8 i, bit_map = 0;
1124
1125 hdev->tm_info.num_tc = num_tc;
1126
1127 for (i = 0; i < hdev->tm_info.num_tc; i++)
1128 bit_map |= BIT(i);
1129
1130 if (!bit_map) {
1131 bit_map = 1;
1132 hdev->tm_info.num_tc = 1;
1133 }
1134
1135 hdev->hw_tc_map = bit_map;
1136
1137 hclge_tm_schd_info_init(hdev);
84844054
S
1138}
1139
1140int hclge_tm_init_hw(struct hclge_dev *hdev)
1141{
1142 int ret;
1143
1144 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1145 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1146 return -ENOTSUPP;
1147
1148 ret = hclge_tm_schd_setup_hw(hdev);
1149 if (ret)
1150 return ret;
1151
1152 ret = hclge_pause_setup_hw(hdev);
1153 if (ret)
1154 return ret;
1155
1156 return 0;
1157}
1158
1159int hclge_tm_schd_init(struct hclge_dev *hdev)
1160{
1161 int ret = hclge_tm_schd_info_init(hdev);
1162
1163 if (ret)
1164 return ret;
1165
1166 return hclge_tm_init_hw(hdev);
1167}