]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
Revert "UBUNTU: SAUCE: {topost} net: hns3: Add SPDX tags to hns3 driver"
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 /*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/etherdevice.h>
11
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15
16 enum hclge_shaper_level {
17 HCLGE_SHAPER_LVL_PRI = 0,
18 HCLGE_SHAPER_LVL_PG = 1,
19 HCLGE_SHAPER_LVL_PORT = 2,
20 HCLGE_SHAPER_LVL_QSET = 3,
21 HCLGE_SHAPER_LVL_CNT = 4,
22 HCLGE_SHAPER_LVL_VF = 0,
23 HCLGE_SHAPER_LVL_PF = 1,
24 };
25
26 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
27 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
28
29 #define HCLGE_SHAPER_BS_U_DEF 5
30 #define HCLGE_SHAPER_BS_S_DEF 20
31
32 #define HCLGE_ETHER_MAX_RATE 100000
33
34 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
35 * @ir: Rate to be config, its unit is Mbps
36 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
37 * @ir_b: IR_B parameter of IR shaper
38 * @ir_u: IR_U parameter of IR shaper
39 * @ir_s: IR_S parameter of IR shaper
40 *
41 * the formula:
42 *
43 * IR_b * (2 ^ IR_u) * 8
44 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
45 * Tick * (2 ^ IR_s)
46 *
47 * @return: 0: calculate sucessful, negative: fail
48 */
49 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
50 u8 *ir_b, u8 *ir_u, u8 *ir_s)
51 {
52 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
53 6 * 256, /* Prioriy level */
54 6 * 32, /* Prioriy group level */
55 6 * 8, /* Port level */
56 6 * 256 /* Qset level */
57 };
58 u8 ir_u_calc = 0, ir_s_calc = 0;
59 u32 ir_calc;
60 u32 tick;
61
62 /* Calc tick */
63 if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
64 return -EINVAL;
65
66 tick = tick_array[shaper_level];
67
68 /**
69 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
70 * the formula is changed to:
71 * 126 * 1 * 8
72 * ir_calc = ---------------- * 1000
73 * tick * 1
74 */
75 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
76
77 if (ir_calc == ir) {
78 *ir_b = 126;
79 *ir_u = 0;
80 *ir_s = 0;
81
82 return 0;
83 } else if (ir_calc > ir) {
84 /* Increasing the denominator to select ir_s value */
85 while (ir_calc > ir) {
86 ir_s_calc++;
87 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
88 }
89
90 if (ir_calc == ir)
91 *ir_b = 126;
92 else
93 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
94 } else {
95 /* Increasing the numerator to select ir_u value */
96 u32 numerator;
97
98 while (ir_calc < ir) {
99 ir_u_calc++;
100 numerator = 1008000 * (1 << ir_u_calc);
101 ir_calc = (numerator + (tick >> 1)) / tick;
102 }
103
104 if (ir_calc == ir) {
105 *ir_b = 126;
106 } else {
107 u32 denominator = (8000 * (1 << --ir_u_calc));
108 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
109 }
110 }
111
112 *ir_u = ir_u_calc;
113 *ir_s = ir_s_calc;
114
115 return 0;
116 }
117
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119 enum hclge_opcode_type opcode, u64 *stats)
120 {
121 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
122 int ret, i, j;
123
124 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
126 return -EINVAL;
127
128 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
129 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
131 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
132 else
133 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
134 }
135
136 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
137 if (ret)
138 return ret;
139
140 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
141 struct hclge_pfc_stats_cmd *pfc_stats =
142 (struct hclge_pfc_stats_cmd *)desc[i].data;
143
144 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
145 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
146
147 if (index < HCLGE_MAX_TC_NUM)
148 stats[index] =
149 le64_to_cpu(pfc_stats->pkt_num[j]);
150 }
151 }
152 return 0;
153 }
154
155 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
156 {
157 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
158 }
159
160 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
161 {
162 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
163 }
164
165 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
166 {
167 struct hclge_desc desc;
168
169 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
170
171 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
172 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
173
174 return hclge_cmd_send(&hdev->hw, &desc, 1);
175 }
176
177 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
178 u8 pfc_bitmap)
179 {
180 struct hclge_desc desc;
181 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
182
183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
184
185 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
186 pfc->pri_en_bitmap = pfc_bitmap;
187
188 return hclge_cmd_send(&hdev->hw, &desc, 1);
189 }
190
191 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
192 u8 pause_trans_gap, u16 pause_trans_time)
193 {
194 struct hclge_cfg_pause_param_cmd *pause_param;
195 struct hclge_desc desc;
196
197 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
198
199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
200
201 ether_addr_copy(pause_param->mac_addr, addr);
202 pause_param->pause_trans_gap = pause_trans_gap;
203 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
204
205 return hclge_cmd_send(&hdev->hw, &desc, 1);
206 }
207
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
209 {
210 struct hclge_cfg_pause_param_cmd *pause_param;
211 struct hclge_desc desc;
212 u16 trans_time;
213 u8 trans_gap;
214 int ret;
215
216 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
217
218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
219
220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
221 if (ret)
222 return ret;
223
224 trans_gap = pause_param->pause_trans_gap;
225 trans_time = le16_to_cpu(pause_param->pause_trans_time);
226
227 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
228 trans_time);
229 }
230
231 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
232 {
233 u8 tc;
234
235 tc = hdev->tm_info.prio_tc[pri_id];
236
237 if (tc >= hdev->tm_info.num_tc)
238 return -EINVAL;
239
240 /**
241 * the register for priority has four bytes, the first bytes includes
242 * priority0 and priority1, the higher 4bit stands for priority1
243 * while the lower 4bit stands for priority0, as below:
244 * first byte: | pri_1 | pri_0 |
245 * second byte: | pri_3 | pri_2 |
246 * third byte: | pri_5 | pri_4 |
247 * fourth byte: | pri_7 | pri_6 |
248 */
249 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
250
251 return 0;
252 }
253
254 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
255 {
256 struct hclge_desc desc;
257 u8 *pri = (u8 *)desc.data;
258 u8 pri_id;
259 int ret;
260
261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
262
263 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
264 ret = hclge_fill_pri_array(hdev, pri, pri_id);
265 if (ret)
266 return ret;
267 }
268
269 return hclge_cmd_send(&hdev->hw, &desc, 1);
270 }
271
272 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
273 u8 pg_id, u8 pri_bit_map)
274 {
275 struct hclge_pg_to_pri_link_cmd *map;
276 struct hclge_desc desc;
277
278 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
279
280 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
281
282 map->pg_id = pg_id;
283 map->pri_bit_map = pri_bit_map;
284
285 return hclge_cmd_send(&hdev->hw, &desc, 1);
286 }
287
288 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
289 u16 qs_id, u8 pri)
290 {
291 struct hclge_qs_to_pri_link_cmd *map;
292 struct hclge_desc desc;
293
294 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
295
296 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
297
298 map->qs_id = cpu_to_le16(qs_id);
299 map->priority = pri;
300 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
301
302 return hclge_cmd_send(&hdev->hw, &desc, 1);
303 }
304
305 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
306 u8 q_id, u16 qs_id)
307 {
308 struct hclge_nq_to_qs_link_cmd *map;
309 struct hclge_desc desc;
310
311 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
312
313 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
314
315 map->nq_id = cpu_to_le16(q_id);
316 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
317
318 return hclge_cmd_send(&hdev->hw, &desc, 1);
319 }
320
321 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
322 u8 dwrr)
323 {
324 struct hclge_pg_weight_cmd *weight;
325 struct hclge_desc desc;
326
327 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
328
329 weight = (struct hclge_pg_weight_cmd *)desc.data;
330
331 weight->pg_id = pg_id;
332 weight->dwrr = dwrr;
333
334 return hclge_cmd_send(&hdev->hw, &desc, 1);
335 }
336
337 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
338 u8 dwrr)
339 {
340 struct hclge_priority_weight_cmd *weight;
341 struct hclge_desc desc;
342
343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
344
345 weight = (struct hclge_priority_weight_cmd *)desc.data;
346
347 weight->pri_id = pri_id;
348 weight->dwrr = dwrr;
349
350 return hclge_cmd_send(&hdev->hw, &desc, 1);
351 }
352
353 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
354 u8 dwrr)
355 {
356 struct hclge_qs_weight_cmd *weight;
357 struct hclge_desc desc;
358
359 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
360
361 weight = (struct hclge_qs_weight_cmd *)desc.data;
362
363 weight->qs_id = cpu_to_le16(qs_id);
364 weight->dwrr = dwrr;
365
366 return hclge_cmd_send(&hdev->hw, &desc, 1);
367 }
368
369 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
370 enum hclge_shap_bucket bucket, u8 pg_id,
371 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
372 {
373 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
374 enum hclge_opcode_type opcode;
375 struct hclge_desc desc;
376 u32 shapping_para = 0;
377
378 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
379 HCLGE_OPC_TM_PG_C_SHAPPING;
380 hclge_cmd_setup_basic_desc(&desc, opcode, false);
381
382 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
383
384 shap_cfg_cmd->pg_id = pg_id;
385
386 hclge_tm_set_field(shapping_para, IR_B, ir_b);
387 hclge_tm_set_field(shapping_para, IR_U, ir_u);
388 hclge_tm_set_field(shapping_para, IR_S, ir_s);
389 hclge_tm_set_field(shapping_para, BS_B, bs_b);
390 hclge_tm_set_field(shapping_para, BS_S, bs_s);
391
392 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
393
394 return hclge_cmd_send(&hdev->hw, &desc, 1);
395 }
396
397 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
398 {
399 struct hclge_port_shapping_cmd *shap_cfg_cmd;
400 struct hclge_desc desc;
401 u32 shapping_para = 0;
402 u8 ir_u, ir_b, ir_s;
403 int ret;
404
405 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
406 HCLGE_SHAPER_LVL_PORT,
407 &ir_b, &ir_u, &ir_s);
408 if (ret)
409 return ret;
410
411 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
412 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
413
414 hclge_tm_set_field(shapping_para, IR_B, ir_b);
415 hclge_tm_set_field(shapping_para, IR_U, ir_u);
416 hclge_tm_set_field(shapping_para, IR_S, ir_s);
417 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
418 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
419
420 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
421
422 return hclge_cmd_send(&hdev->hw, &desc, 1);
423 }
424
425 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
426 enum hclge_shap_bucket bucket, u8 pri_id,
427 u8 ir_b, u8 ir_u, u8 ir_s,
428 u8 bs_b, u8 bs_s)
429 {
430 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
431 enum hclge_opcode_type opcode;
432 struct hclge_desc desc;
433 u32 shapping_para = 0;
434
435 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
436 HCLGE_OPC_TM_PRI_C_SHAPPING;
437
438 hclge_cmd_setup_basic_desc(&desc, opcode, false);
439
440 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
441
442 shap_cfg_cmd->pri_id = pri_id;
443
444 hclge_tm_set_field(shapping_para, IR_B, ir_b);
445 hclge_tm_set_field(shapping_para, IR_U, ir_u);
446 hclge_tm_set_field(shapping_para, IR_S, ir_s);
447 hclge_tm_set_field(shapping_para, BS_B, bs_b);
448 hclge_tm_set_field(shapping_para, BS_S, bs_s);
449
450 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
451
452 return hclge_cmd_send(&hdev->hw, &desc, 1);
453 }
454
455 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
456 {
457 struct hclge_desc desc;
458
459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
460
461 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
462 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
463 else
464 desc.data[1] = 0;
465
466 desc.data[0] = cpu_to_le32(pg_id);
467
468 return hclge_cmd_send(&hdev->hw, &desc, 1);
469 }
470
471 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
472 {
473 struct hclge_desc desc;
474
475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
476
477 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
478 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
479 else
480 desc.data[1] = 0;
481
482 desc.data[0] = cpu_to_le32(pri_id);
483
484 return hclge_cmd_send(&hdev->hw, &desc, 1);
485 }
486
487 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
488 {
489 struct hclge_desc desc;
490
491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
492
493 if (mode == HCLGE_SCH_MODE_DWRR)
494 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
495 else
496 desc.data[1] = 0;
497
498 desc.data[0] = cpu_to_le32(qs_id);
499
500 return hclge_cmd_send(&hdev->hw, &desc, 1);
501 }
502
503 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
504 u32 bit_map)
505 {
506 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
507 struct hclge_desc desc;
508
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
510 false);
511
512 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
513
514 bp_to_qs_map_cmd->tc_id = tc;
515 bp_to_qs_map_cmd->qs_group_id = grp_id;
516 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
517
518 return hclge_cmd_send(&hdev->hw, &desc, 1);
519 }
520
521 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
522 {
523 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
524 struct hclge_dev *hdev = vport->back;
525 u8 i;
526
527 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
528 kinfo->num_tc =
529 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
530 kinfo->rss_size
531 = min_t(u16, hdev->rss_size_max,
532 kinfo->num_tqps / kinfo->num_tc);
533 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
534 vport->dwrr = 100; /* 100 percent as init */
535 vport->alloc_rss_size = kinfo->rss_size;
536
537 for (i = 0; i < kinfo->num_tc; i++) {
538 if (hdev->hw_tc_map & BIT(i)) {
539 kinfo->tc_info[i].enable = true;
540 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
541 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
542 kinfo->tc_info[i].tc = i;
543 } else {
544 /* Set to default queue if TC is disable */
545 kinfo->tc_info[i].enable = false;
546 kinfo->tc_info[i].tqp_offset = 0;
547 kinfo->tc_info[i].tqp_count = 1;
548 kinfo->tc_info[i].tc = 0;
549 }
550 }
551
552 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
553 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
554 }
555
556 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
557 {
558 struct hclge_vport *vport = hdev->vport;
559 u32 i;
560
561 for (i = 0; i < hdev->num_alloc_vport; i++) {
562 hclge_tm_vport_tc_info_update(vport);
563
564 vport++;
565 }
566 }
567
568 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
569 {
570 u8 i;
571
572 for (i = 0; i < hdev->tm_info.num_tc; i++) {
573 hdev->tm_info.tc_info[i].tc_id = i;
574 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
575 hdev->tm_info.tc_info[i].pgid = 0;
576 hdev->tm_info.tc_info[i].bw_limit =
577 hdev->tm_info.pg_info[0].bw_limit;
578 }
579
580 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
581 hdev->tm_info.prio_tc[i] =
582 (i >= hdev->tm_info.num_tc) ? 0 : i;
583
584 /* DCB is enabled if we have more than 1 TC */
585 if (hdev->tm_info.num_tc > 1)
586 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
587 else
588 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
589 }
590
591 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
592 {
593 u8 i;
594
595 for (i = 0; i < hdev->tm_info.num_pg; i++) {
596 int k;
597
598 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
599
600 hdev->tm_info.pg_info[i].pg_id = i;
601 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
602
603 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
604
605 if (i != 0)
606 continue;
607
608 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
609 for (k = 0; k < hdev->tm_info.num_tc; k++)
610 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
611 }
612 }
613
614 static void hclge_pfc_info_init(struct hclge_dev *hdev)
615 {
616 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
617 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
618 dev_warn(&hdev->pdev->dev,
619 "DCB is disable, but last mode is FC_PFC\n");
620
621 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
622 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
623 /* fc_mode_last_time record the last fc_mode when
624 * DCB is enabled, so that fc_mode can be set to
625 * the correct value when DCB is disabled.
626 */
627 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
628 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
629 }
630 }
631
632 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
633 {
634 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
635 (hdev->tm_info.num_pg != 1))
636 return -EINVAL;
637
638 hclge_tm_pg_info_init(hdev);
639
640 hclge_tm_tc_info_init(hdev);
641
642 hclge_tm_vport_info_update(hdev);
643
644 hclge_pfc_info_init(hdev);
645
646 return 0;
647 }
648
649 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
650 {
651 int ret;
652 u32 i;
653
654 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
655 return 0;
656
657 for (i = 0; i < hdev->tm_info.num_pg; i++) {
658 /* Cfg mapping */
659 ret = hclge_tm_pg_to_pri_map_cfg(
660 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
661 if (ret)
662 return ret;
663 }
664
665 return 0;
666 }
667
668 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
669 {
670 u8 ir_u, ir_b, ir_s;
671 int ret;
672 u32 i;
673
674 /* Cfg pg schd */
675 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
676 return 0;
677
678 /* Pg to pri */
679 for (i = 0; i < hdev->tm_info.num_pg; i++) {
680 /* Calc shaper para */
681 ret = hclge_shaper_para_calc(
682 hdev->tm_info.pg_info[i].bw_limit,
683 HCLGE_SHAPER_LVL_PG,
684 &ir_b, &ir_u, &ir_s);
685 if (ret)
686 return ret;
687
688 ret = hclge_tm_pg_shapping_cfg(hdev,
689 HCLGE_TM_SHAP_C_BUCKET, i,
690 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
691 HCLGE_SHAPER_BS_S_DEF);
692 if (ret)
693 return ret;
694
695 ret = hclge_tm_pg_shapping_cfg(hdev,
696 HCLGE_TM_SHAP_P_BUCKET, i,
697 ir_b, ir_u, ir_s,
698 HCLGE_SHAPER_BS_U_DEF,
699 HCLGE_SHAPER_BS_S_DEF);
700 if (ret)
701 return ret;
702 }
703
704 return 0;
705 }
706
707 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
708 {
709 int ret;
710 u32 i;
711
712 /* cfg pg schd */
713 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
714 return 0;
715
716 /* pg to prio */
717 for (i = 0; i < hdev->tm_info.num_pg; i++) {
718 /* Cfg dwrr */
719 ret = hclge_tm_pg_weight_cfg(hdev, i,
720 hdev->tm_info.pg_dwrr[i]);
721 if (ret)
722 return ret;
723 }
724
725 return 0;
726 }
727
728 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
729 struct hclge_vport *vport)
730 {
731 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
732 struct hnae3_queue **tqp = kinfo->tqp;
733 struct hnae3_tc_info *v_tc_info;
734 u32 i, j;
735 int ret;
736
737 for (i = 0; i < kinfo->num_tc; i++) {
738 v_tc_info = &kinfo->tc_info[i];
739 for (j = 0; j < v_tc_info->tqp_count; j++) {
740 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
741
742 ret = hclge_tm_q_to_qs_map_cfg(hdev,
743 hclge_get_queue_id(q),
744 vport->qs_offset + i);
745 if (ret)
746 return ret;
747 }
748 }
749
750 return 0;
751 }
752
753 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
754 {
755 struct hclge_vport *vport = hdev->vport;
756 int ret;
757 u32 i, k;
758
759 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
760 /* Cfg qs -> pri mapping, one by one mapping */
761 for (k = 0; k < hdev->num_alloc_vport; k++)
762 for (i = 0; i < hdev->tm_info.num_tc; i++) {
763 ret = hclge_tm_qs_to_pri_map_cfg(
764 hdev, vport[k].qs_offset + i, i);
765 if (ret)
766 return ret;
767 }
768 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
769 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
770 for (k = 0; k < hdev->num_alloc_vport; k++)
771 for (i = 0; i < HNAE3_MAX_TC; i++) {
772 ret = hclge_tm_qs_to_pri_map_cfg(
773 hdev, vport[k].qs_offset + i, k);
774 if (ret)
775 return ret;
776 }
777 } else {
778 return -EINVAL;
779 }
780
781 /* Cfg q -> qs mapping */
782 for (i = 0; i < hdev->num_alloc_vport; i++) {
783 ret = hclge_vport_q_to_qs_map(hdev, vport);
784 if (ret)
785 return ret;
786
787 vport++;
788 }
789
790 return 0;
791 }
792
793 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
794 {
795 u8 ir_u, ir_b, ir_s;
796 int ret;
797 u32 i;
798
799 for (i = 0; i < hdev->tm_info.num_tc; i++) {
800 ret = hclge_shaper_para_calc(
801 hdev->tm_info.tc_info[i].bw_limit,
802 HCLGE_SHAPER_LVL_PRI,
803 &ir_b, &ir_u, &ir_s);
804 if (ret)
805 return ret;
806
807 ret = hclge_tm_pri_shapping_cfg(
808 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
809 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
810 HCLGE_SHAPER_BS_S_DEF);
811 if (ret)
812 return ret;
813
814 ret = hclge_tm_pri_shapping_cfg(
815 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
816 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
817 HCLGE_SHAPER_BS_S_DEF);
818 if (ret)
819 return ret;
820 }
821
822 return 0;
823 }
824
825 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
826 {
827 struct hclge_dev *hdev = vport->back;
828 u8 ir_u, ir_b, ir_s;
829 int ret;
830
831 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
832 &ir_b, &ir_u, &ir_s);
833 if (ret)
834 return ret;
835
836 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
837 vport->vport_id,
838 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
839 HCLGE_SHAPER_BS_S_DEF);
840 if (ret)
841 return ret;
842
843 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
844 vport->vport_id,
845 ir_b, ir_u, ir_s,
846 HCLGE_SHAPER_BS_U_DEF,
847 HCLGE_SHAPER_BS_S_DEF);
848 if (ret)
849 return ret;
850
851 return 0;
852 }
853
854 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
855 {
856 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
857 struct hclge_dev *hdev = vport->back;
858 u8 ir_u, ir_b, ir_s;
859 u32 i;
860 int ret;
861
862 for (i = 0; i < kinfo->num_tc; i++) {
863 ret = hclge_shaper_para_calc(
864 hdev->tm_info.tc_info[i].bw_limit,
865 HCLGE_SHAPER_LVL_QSET,
866 &ir_b, &ir_u, &ir_s);
867 if (ret)
868 return ret;
869 }
870
871 return 0;
872 }
873
874 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
875 {
876 struct hclge_vport *vport = hdev->vport;
877 int ret;
878 u32 i;
879
880 /* Need config vport shaper */
881 for (i = 0; i < hdev->num_alloc_vport; i++) {
882 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
883 if (ret)
884 return ret;
885
886 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
887 if (ret)
888 return ret;
889
890 vport++;
891 }
892
893 return 0;
894 }
895
896 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
897 {
898 int ret;
899
900 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
901 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
902 if (ret)
903 return ret;
904 } else {
905 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
906 if (ret)
907 return ret;
908 }
909
910 return 0;
911 }
912
913 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
914 {
915 struct hclge_vport *vport = hdev->vport;
916 struct hclge_pg_info *pg_info;
917 u8 dwrr;
918 int ret;
919 u32 i, k;
920
921 for (i = 0; i < hdev->tm_info.num_tc; i++) {
922 pg_info =
923 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
924 dwrr = pg_info->tc_dwrr[i];
925
926 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
927 if (ret)
928 return ret;
929
930 for (k = 0; k < hdev->num_alloc_vport; k++) {
931 ret = hclge_tm_qs_weight_cfg(
932 hdev, vport[k].qs_offset + i,
933 vport[k].dwrr);
934 if (ret)
935 return ret;
936 }
937 }
938
939 return 0;
940 }
941
942 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
943 {
944 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
945 struct hclge_dev *hdev = vport->back;
946 int ret;
947 u8 i;
948
949 /* Vf dwrr */
950 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
951 if (ret)
952 return ret;
953
954 /* Qset dwrr */
955 for (i = 0; i < kinfo->num_tc; i++) {
956 ret = hclge_tm_qs_weight_cfg(
957 hdev, vport->qs_offset + i,
958 hdev->tm_info.pg_info[0].tc_dwrr[i]);
959 if (ret)
960 return ret;
961 }
962
963 return 0;
964 }
965
966 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
967 {
968 struct hclge_vport *vport = hdev->vport;
969 int ret;
970 u32 i;
971
972 for (i = 0; i < hdev->num_alloc_vport; i++) {
973 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
974 if (ret)
975 return ret;
976
977 vport++;
978 }
979
980 return 0;
981 }
982
983 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
984 {
985 int ret;
986
987 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
988 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
989 if (ret)
990 return ret;
991 } else {
992 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
993 if (ret)
994 return ret;
995 }
996
997 return 0;
998 }
999
1000 int hclge_tm_map_cfg(struct hclge_dev *hdev)
1001 {
1002 int ret;
1003
1004 ret = hclge_up_to_tc_map(hdev);
1005 if (ret)
1006 return ret;
1007
1008 ret = hclge_tm_pg_to_pri_map(hdev);
1009 if (ret)
1010 return ret;
1011
1012 return hclge_tm_pri_q_qs_cfg(hdev);
1013 }
1014
1015 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1016 {
1017 int ret;
1018
1019 ret = hclge_tm_port_shaper_cfg(hdev);
1020 if (ret)
1021 return ret;
1022
1023 ret = hclge_tm_pg_shaper_cfg(hdev);
1024 if (ret)
1025 return ret;
1026
1027 return hclge_tm_pri_shaper_cfg(hdev);
1028 }
1029
1030 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1031 {
1032 int ret;
1033
1034 ret = hclge_tm_pg_dwrr_cfg(hdev);
1035 if (ret)
1036 return ret;
1037
1038 return hclge_tm_pri_dwrr_cfg(hdev);
1039 }
1040
1041 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1042 {
1043 int ret;
1044 u8 i;
1045
1046 /* Only being config on TC-Based scheduler mode */
1047 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1048 return 0;
1049
1050 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1051 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1052 if (ret)
1053 return ret;
1054 }
1055
1056 return 0;
1057 }
1058
1059 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1060 {
1061 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1062 struct hclge_dev *hdev = vport->back;
1063 int ret;
1064 u8 i;
1065
1066 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1067 if (ret)
1068 return ret;
1069
1070 for (i = 0; i < kinfo->num_tc; i++) {
1071 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1072
1073 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1074 sch_mode);
1075 if (ret)
1076 return ret;
1077 }
1078
1079 return 0;
1080 }
1081
1082 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1083 {
1084 struct hclge_vport *vport = hdev->vport;
1085 int ret;
1086 u8 i, k;
1087
1088 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1089 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1090 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1091 if (ret)
1092 return ret;
1093
1094 for (k = 0; k < hdev->num_alloc_vport; k++) {
1095 ret = hclge_tm_qs_schd_mode_cfg(
1096 hdev, vport[k].qs_offset + i,
1097 HCLGE_SCH_MODE_DWRR);
1098 if (ret)
1099 return ret;
1100 }
1101 }
1102 } else {
1103 for (i = 0; i < hdev->num_alloc_vport; i++) {
1104 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1105 if (ret)
1106 return ret;
1107
1108 vport++;
1109 }
1110 }
1111
1112 return 0;
1113 }
1114
1115 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1116 {
1117 int ret;
1118
1119 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1120 if (ret)
1121 return ret;
1122
1123 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1124 }
1125
1126 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1127 {
1128 int ret;
1129
1130 /* Cfg tm mapping */
1131 ret = hclge_tm_map_cfg(hdev);
1132 if (ret)
1133 return ret;
1134
1135 /* Cfg tm shaper */
1136 ret = hclge_tm_shaper_cfg(hdev);
1137 if (ret)
1138 return ret;
1139
1140 /* Cfg dwrr */
1141 ret = hclge_tm_dwrr_cfg(hdev);
1142 if (ret)
1143 return ret;
1144
1145 /* Cfg schd mode for each level schd */
1146 return hclge_tm_schd_mode_hw(hdev);
1147 }
1148
1149 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1150 {
1151 struct hclge_mac *mac = &hdev->hw.mac;
1152
1153 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1154 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1155 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1156 }
1157
1158 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1159 {
1160 u8 enable_bitmap = 0;
1161
1162 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1163 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1164 HCLGE_RX_MAC_PAUSE_EN_MSK;
1165
1166 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1167 hdev->tm_info.hw_pfc_map);
1168 }
1169
1170 /* Each Tc has a 1024 queue sets to backpress, it divides to
1171 * 32 group, each group contains 32 queue sets, which can be
1172 * represented by u32 bitmap.
1173 */
1174 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1175 {
1176 struct hclge_vport *vport = hdev->vport;
1177 u32 i, k, qs_bitmap;
1178 int ret;
1179
1180 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1181 qs_bitmap = 0;
1182
1183 for (k = 0; k < hdev->num_alloc_vport; k++) {
1184 u16 qs_id = vport->qs_offset + tc;
1185 u8 grp, sub_grp;
1186
1187 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1188 HCLGE_BP_GRP_ID_S);
1189 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1190 HCLGE_BP_SUB_GRP_ID_S);
1191 if (i == grp)
1192 qs_bitmap |= (1 << sub_grp);
1193
1194 vport++;
1195 }
1196
1197 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1198 if (ret)
1199 return ret;
1200 }
1201
1202 return 0;
1203 }
1204
1205 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1206 {
1207 bool tx_en, rx_en;
1208
1209 switch (hdev->tm_info.fc_mode) {
1210 case HCLGE_FC_NONE:
1211 tx_en = false;
1212 rx_en = false;
1213 break;
1214 case HCLGE_FC_RX_PAUSE:
1215 tx_en = false;
1216 rx_en = true;
1217 break;
1218 case HCLGE_FC_TX_PAUSE:
1219 tx_en = true;
1220 rx_en = false;
1221 break;
1222 case HCLGE_FC_FULL:
1223 tx_en = true;
1224 rx_en = true;
1225 break;
1226 default:
1227 tx_en = true;
1228 rx_en = true;
1229 }
1230
1231 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1232 }
1233
1234 int hclge_pause_setup_hw(struct hclge_dev *hdev)
1235 {
1236 int ret;
1237 u8 i;
1238
1239 ret = hclge_pause_param_setup_hw(hdev);
1240 if (ret)
1241 return ret;
1242
1243 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
1244 return hclge_mac_pause_setup_hw(hdev);
1245
1246 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1247 if (!hnae3_dev_dcb_supported(hdev))
1248 return 0;
1249
1250 /* When MAC is GE Mode, hdev does not support pfc setting */
1251 ret = hclge_pfc_setup_hw(hdev);
1252 if (ret)
1253 dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
1254
1255 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1256 ret = hclge_bp_setup_hw(hdev, i);
1257 if (ret)
1258 return ret;
1259 }
1260
1261 return 0;
1262 }
1263
1264 int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1265 {
1266 struct hclge_vport *vport = hdev->vport;
1267 struct hnae3_knic_private_info *kinfo;
1268 u32 i, k;
1269
1270 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1271 if (prio_tc[i] >= hdev->tm_info.num_tc)
1272 return -EINVAL;
1273 hdev->tm_info.prio_tc[i] = prio_tc[i];
1274
1275 for (k = 0; k < hdev->num_alloc_vport; k++) {
1276 kinfo = &vport[k].nic.kinfo;
1277 kinfo->prio_tc[i] = prio_tc[i];
1278 }
1279 }
1280 return 0;
1281 }
1282
1283 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1284 {
1285 u8 i, bit_map = 0;
1286
1287 hdev->tm_info.num_tc = num_tc;
1288
1289 for (i = 0; i < hdev->tm_info.num_tc; i++)
1290 bit_map |= BIT(i);
1291
1292 if (!bit_map) {
1293 bit_map = 1;
1294 hdev->tm_info.num_tc = 1;
1295 }
1296
1297 hdev->hw_tc_map = bit_map;
1298
1299 hclge_tm_schd_info_init(hdev);
1300 }
1301
1302 int hclge_tm_init_hw(struct hclge_dev *hdev)
1303 {
1304 int ret;
1305
1306 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1307 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1308 return -ENOTSUPP;
1309
1310 ret = hclge_tm_schd_setup_hw(hdev);
1311 if (ret)
1312 return ret;
1313
1314 ret = hclge_pause_setup_hw(hdev);
1315 if (ret)
1316 return ret;
1317
1318 return 0;
1319 }
1320
1321 int hclge_tm_schd_init(struct hclge_dev *hdev)
1322 {
1323 int ret;
1324
1325 /* fc_mode is HCLGE_FC_FULL on reset */
1326 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1327 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1328
1329 ret = hclge_tm_schd_info_init(hdev);
1330 if (ret)
1331 return ret;
1332
1333 return hclge_tm_init_hw(hdev);
1334 }