1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level
{
11 HCLGE_SHAPER_LVL_PRI
= 0,
12 HCLGE_SHAPER_LVL_PG
= 1,
13 HCLGE_SHAPER_LVL_PORT
= 2,
14 HCLGE_SHAPER_LVL_QSET
= 3,
15 HCLGE_SHAPER_LVL_CNT
= 4,
16 HCLGE_SHAPER_LVL_VF
= 0,
17 HCLGE_SHAPER_LVL_PF
= 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 #define HCLGE_ETHER_MAX_RATE 100000
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29 * @ir: Rate to be config, its unit is Mbps
30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31 * @ir_b: IR_B parameter of IR shaper
32 * @ir_u: IR_U parameter of IR shaper
33 * @ir_s: IR_S parameter of IR shaper
37 * IR_b * (2 ^ IR_u) * 8
38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
41 * @return: 0: calculate sucessful, negative: fail
43 static int hclge_shaper_para_calc(u32 ir
, u8 shaper_level
,
44 u8
*ir_b
, u8
*ir_u
, u8
*ir_s
)
46 #define DIVISOR_CLK (1000 * 8)
47 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
49 static const u16 tick_array
[HCLGE_SHAPER_LVL_CNT
] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
61 if (shaper_level
>= HCLGE_SHAPER_LVL_CNT
||
62 ir
> HCLGE_ETHER_MAX_RATE
)
65 tick
= tick_array
[shaper_level
];
68 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 * the formula is changed to:
71 * ir_calc = ---------------- * 1000
74 ir_calc
= (DIVISOR_IR_B_126
+ (tick
>> 1) - 1) / tick
;
82 } else if (ir_calc
> ir
) {
83 /* Increasing the denominator to select ir_s value */
84 while (ir_calc
>= ir
&& ir
) {
86 ir_calc
= DIVISOR_IR_B_126
/ (tick
* (1 << ir_s_calc
));
89 *ir_b
= (ir
* tick
* (1 << ir_s_calc
) + (DIVISOR_CLK
>> 1)) /
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc
< ir
) {
97 numerator
= DIVISOR_IR_B_126
* (1 << ir_u_calc
);
98 ir_calc
= (numerator
+ (tick
>> 1)) / tick
;
104 u32 denominator
= DIVISOR_CLK
* (1 << --ir_u_calc
);
105 *ir_b
= (ir
* tick
+ (denominator
>> 1)) / denominator
;
115 static int hclge_pfc_stats_get(struct hclge_dev
*hdev
,
116 enum hclge_opcode_type opcode
, u64
*stats
)
118 struct hclge_desc desc
[HCLGE_TM_PFC_PKT_GET_CMD_NUM
];
121 if (!(opcode
== HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
||
122 opcode
== HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
))
125 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
- 1; i
++) {
126 hclge_cmd_setup_basic_desc(&desc
[i
], opcode
, true);
127 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
130 hclge_cmd_setup_basic_desc(&desc
[i
], opcode
, true);
132 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_TM_PFC_PKT_GET_CMD_NUM
);
136 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
; i
++) {
137 struct hclge_pfc_stats_cmd
*pfc_stats
=
138 (struct hclge_pfc_stats_cmd
*)desc
[i
].data
;
140 for (j
= 0; j
< HCLGE_TM_PFC_NUM_GET_PER_CMD
; j
++) {
141 u32 index
= i
* HCLGE_TM_PFC_PKT_GET_CMD_NUM
+ j
;
143 if (index
< HCLGE_MAX_TC_NUM
)
145 le64_to_cpu(pfc_stats
->pkt_num
[j
]);
151 int hclge_pfc_rx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
153 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
, stats
);
156 int hclge_pfc_tx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
158 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
, stats
);
161 int hclge_mac_pause_en_cfg(struct hclge_dev
*hdev
, bool tx
, bool rx
)
163 struct hclge_desc desc
;
165 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PAUSE_EN
, false);
167 desc
.data
[0] = cpu_to_le32((tx
? HCLGE_TX_MAC_PAUSE_EN_MSK
: 0) |
168 (rx
? HCLGE_RX_MAC_PAUSE_EN_MSK
: 0));
170 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
173 static int hclge_pfc_pause_en_cfg(struct hclge_dev
*hdev
, u8 tx_rx_bitmap
,
176 struct hclge_desc desc
;
177 struct hclge_pfc_en_cmd
*pfc
= (struct hclge_pfc_en_cmd
*)desc
.data
;
179 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PFC_PAUSE_EN
, false);
181 pfc
->tx_rx_en_bitmap
= tx_rx_bitmap
;
182 pfc
->pri_en_bitmap
= pfc_bitmap
;
184 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
187 static int hclge_pause_param_cfg(struct hclge_dev
*hdev
, const u8
*addr
,
188 u8 pause_trans_gap
, u16 pause_trans_time
)
190 struct hclge_cfg_pause_param_cmd
*pause_param
;
191 struct hclge_desc desc
;
193 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
195 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, false);
197 ether_addr_copy(pause_param
->mac_addr
, addr
);
198 ether_addr_copy(pause_param
->mac_addr_extra
, addr
);
199 pause_param
->pause_trans_gap
= pause_trans_gap
;
200 pause_param
->pause_trans_time
= cpu_to_le16(pause_trans_time
);
202 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
205 int hclge_pause_addr_cfg(struct hclge_dev
*hdev
, const u8
*mac_addr
)
207 struct hclge_cfg_pause_param_cmd
*pause_param
;
208 struct hclge_desc desc
;
213 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
215 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, true);
217 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
221 trans_gap
= pause_param
->pause_trans_gap
;
222 trans_time
= le16_to_cpu(pause_param
->pause_trans_time
);
224 return hclge_pause_param_cfg(hdev
, mac_addr
, trans_gap
, trans_time
);
227 static int hclge_fill_pri_array(struct hclge_dev
*hdev
, u8
*pri
, u8 pri_id
)
231 tc
= hdev
->tm_info
.prio_tc
[pri_id
];
233 if (tc
>= hdev
->tm_info
.num_tc
)
237 * the register for priority has four bytes, the first bytes includes
238 * priority0 and priority1, the higher 4bit stands for priority1
239 * while the lower 4bit stands for priority0, as below:
240 * first byte: | pri_1 | pri_0 |
241 * second byte: | pri_3 | pri_2 |
242 * third byte: | pri_5 | pri_4 |
243 * fourth byte: | pri_7 | pri_6 |
245 pri
[pri_id
>> 1] |= tc
<< ((pri_id
& 1) * 4);
250 static int hclge_up_to_tc_map(struct hclge_dev
*hdev
)
252 struct hclge_desc desc
;
253 u8
*pri
= (u8
*)desc
.data
;
257 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PRI_TO_TC_MAPPING
, false);
259 for (pri_id
= 0; pri_id
< HNAE3_MAX_USER_PRIO
; pri_id
++) {
260 ret
= hclge_fill_pri_array(hdev
, pri
, pri_id
);
265 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
268 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev
*hdev
,
269 u8 pg_id
, u8 pri_bit_map
)
271 struct hclge_pg_to_pri_link_cmd
*map
;
272 struct hclge_desc desc
;
274 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, false);
276 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
279 map
->pri_bit_map
= pri_bit_map
;
281 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
284 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev
*hdev
,
287 struct hclge_qs_to_pri_link_cmd
*map
;
288 struct hclge_desc desc
;
290 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, false);
292 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
294 map
->qs_id
= cpu_to_le16(qs_id
);
296 map
->link_vld
= HCLGE_TM_QS_PRI_LINK_VLD_MSK
;
298 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
301 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev
*hdev
,
304 struct hclge_nq_to_qs_link_cmd
*map
;
305 struct hclge_desc desc
;
307 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, false);
309 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
311 map
->nq_id
= cpu_to_le16(q_id
);
312 map
->qset_id
= cpu_to_le16(qs_id
| HCLGE_TM_Q_QS_LINK_VLD_MSK
);
314 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
317 static int hclge_tm_pg_weight_cfg(struct hclge_dev
*hdev
, u8 pg_id
,
320 struct hclge_pg_weight_cmd
*weight
;
321 struct hclge_desc desc
;
323 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, false);
325 weight
= (struct hclge_pg_weight_cmd
*)desc
.data
;
327 weight
->pg_id
= pg_id
;
330 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
333 static int hclge_tm_pri_weight_cfg(struct hclge_dev
*hdev
, u8 pri_id
,
336 struct hclge_priority_weight_cmd
*weight
;
337 struct hclge_desc desc
;
339 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, false);
341 weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
343 weight
->pri_id
= pri_id
;
346 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
349 static int hclge_tm_qs_weight_cfg(struct hclge_dev
*hdev
, u16 qs_id
,
352 struct hclge_qs_weight_cmd
*weight
;
353 struct hclge_desc desc
;
355 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, false);
357 weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
359 weight
->qs_id
= cpu_to_le16(qs_id
);
362 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
365 static u32
hclge_tm_get_shapping_para(u8 ir_b
, u8 ir_u
, u8 ir_s
,
368 u32 shapping_para
= 0;
370 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
371 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
372 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
373 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
374 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
376 return shapping_para
;
379 static int hclge_tm_pg_shapping_cfg(struct hclge_dev
*hdev
,
380 enum hclge_shap_bucket bucket
, u8 pg_id
,
383 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
384 enum hclge_opcode_type opcode
;
385 struct hclge_desc desc
;
387 opcode
= bucket
? HCLGE_OPC_TM_PG_P_SHAPPING
:
388 HCLGE_OPC_TM_PG_C_SHAPPING
;
389 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
391 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
393 shap_cfg_cmd
->pg_id
= pg_id
;
395 shap_cfg_cmd
->pg_shapping_para
= cpu_to_le32(shapping_para
);
397 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
400 static int hclge_tm_port_shaper_cfg(struct hclge_dev
*hdev
)
402 struct hclge_port_shapping_cmd
*shap_cfg_cmd
;
403 struct hclge_desc desc
;
408 ret
= hclge_shaper_para_calc(hdev
->hw
.mac
.speed
,
409 HCLGE_SHAPER_LVL_PORT
,
410 &ir_b
, &ir_u
, &ir_s
);
414 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, false);
415 shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
417 shapping_para
= hclge_tm_get_shapping_para(ir_b
, ir_u
, ir_s
,
418 HCLGE_SHAPER_BS_U_DEF
,
419 HCLGE_SHAPER_BS_S_DEF
);
421 shap_cfg_cmd
->port_shapping_para
= cpu_to_le32(shapping_para
);
423 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
426 static int hclge_tm_pri_shapping_cfg(struct hclge_dev
*hdev
,
427 enum hclge_shap_bucket bucket
, u8 pri_id
,
430 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
431 enum hclge_opcode_type opcode
;
432 struct hclge_desc desc
;
434 opcode
= bucket
? HCLGE_OPC_TM_PRI_P_SHAPPING
:
435 HCLGE_OPC_TM_PRI_C_SHAPPING
;
437 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
439 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
441 shap_cfg_cmd
->pri_id
= pri_id
;
443 shap_cfg_cmd
->pri_shapping_para
= cpu_to_le32(shapping_para
);
445 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
448 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pg_id
)
450 struct hclge_desc desc
;
452 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, false);
454 if (hdev
->tm_info
.pg_info
[pg_id
].pg_sch_mode
== HCLGE_SCH_MODE_DWRR
)
455 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
459 desc
.data
[0] = cpu_to_le32(pg_id
);
461 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
464 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
466 struct hclge_desc desc
;
468 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, false);
470 if (hdev
->tm_info
.tc_info
[pri_id
].tc_sch_mode
== HCLGE_SCH_MODE_DWRR
)
471 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
475 desc
.data
[0] = cpu_to_le32(pri_id
);
477 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
480 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 mode
)
482 struct hclge_desc desc
;
484 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, false);
486 if (mode
== HCLGE_SCH_MODE_DWRR
)
487 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
491 desc
.data
[0] = cpu_to_le32(qs_id
);
493 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
496 static int hclge_tm_qs_bp_cfg(struct hclge_dev
*hdev
, u8 tc
, u8 grp_id
,
499 struct hclge_bp_to_qs_map_cmd
*bp_to_qs_map_cmd
;
500 struct hclge_desc desc
;
502 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_BP_TO_QSET_MAPPING
,
505 bp_to_qs_map_cmd
= (struct hclge_bp_to_qs_map_cmd
*)desc
.data
;
507 bp_to_qs_map_cmd
->tc_id
= tc
;
508 bp_to_qs_map_cmd
->qs_group_id
= grp_id
;
509 bp_to_qs_map_cmd
->qs_bit_map
= cpu_to_le32(bit_map
);
511 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
514 int hclge_tm_qs_shaper_cfg(struct hclge_vport
*vport
, int max_tx_rate
)
516 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
517 struct hclge_qs_shapping_cmd
*shap_cfg_cmd
;
518 struct hclge_dev
*hdev
= vport
->back
;
519 struct hclge_desc desc
;
525 max_tx_rate
= HCLGE_ETHER_MAX_RATE
;
527 ret
= hclge_shaper_para_calc(max_tx_rate
, HCLGE_SHAPER_LVL_QSET
,
528 &ir_b
, &ir_u
, &ir_s
);
532 shaper_para
= hclge_tm_get_shapping_para(ir_b
, ir_u
, ir_s
,
533 HCLGE_SHAPER_BS_U_DEF
,
534 HCLGE_SHAPER_BS_S_DEF
);
536 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
537 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QCN_SHAPPING_CFG
,
540 shap_cfg_cmd
= (struct hclge_qs_shapping_cmd
*)desc
.data
;
541 shap_cfg_cmd
->qs_id
= cpu_to_le16(vport
->qs_offset
+ i
);
542 shap_cfg_cmd
->qs_shapping_para
= cpu_to_le32(shaper_para
);
544 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
546 dev_err(&hdev
->pdev
->dev
,
547 "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
548 vport
->vport_id
, shap_cfg_cmd
->qs_id
,
557 static void hclge_tm_vport_tc_info_update(struct hclge_vport
*vport
)
559 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
560 struct hclge_dev
*hdev
= vport
->back
;
564 /* TC configuration is shared by PF/VF in one port, only allow
565 * one tc for VF for simplicity. VF's vport_id is non zero.
567 kinfo
->num_tc
= vport
->vport_id
? 1 :
568 min_t(u16
, vport
->alloc_tqps
, hdev
->tm_info
.num_tc
);
569 vport
->qs_offset
= (vport
->vport_id
? hdev
->tm_info
.num_tc
: 0) +
570 (vport
->vport_id
? (vport
->vport_id
- 1) : 0);
572 max_rss_size
= min_t(u16
, hdev
->rss_size_max
,
573 vport
->alloc_tqps
/ kinfo
->num_tc
);
575 /* Set to user value, no larger than max_rss_size. */
576 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
577 kinfo
->req_rss_size
<= max_rss_size
) {
578 dev_info(&hdev
->pdev
->dev
, "rss changes from %u to %u\n",
579 kinfo
->rss_size
, kinfo
->req_rss_size
);
580 kinfo
->rss_size
= kinfo
->req_rss_size
;
581 } else if (kinfo
->rss_size
> max_rss_size
||
582 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
)) {
583 /* if user not set rss, the rss_size should compare with the
584 * valid msi numbers to ensure one to one map between tqp and
587 if (!kinfo
->req_rss_size
)
588 max_rss_size
= min_t(u16
, max_rss_size
,
589 (hdev
->num_nic_msi
- 1) /
592 /* Set to the maximum specification value (max_rss_size). */
593 kinfo
->rss_size
= max_rss_size
;
596 kinfo
->num_tqps
= kinfo
->num_tc
* kinfo
->rss_size
;
597 vport
->dwrr
= 100; /* 100 percent as init */
598 vport
->alloc_rss_size
= kinfo
->rss_size
;
599 vport
->bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
601 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
602 if (hdev
->hw_tc_map
& BIT(i
) && i
< kinfo
->num_tc
) {
603 kinfo
->tc_info
[i
].enable
= true;
604 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
605 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
606 kinfo
->tc_info
[i
].tc
= i
;
608 /* Set to default queue if TC is disable */
609 kinfo
->tc_info
[i
].enable
= false;
610 kinfo
->tc_info
[i
].tqp_offset
= 0;
611 kinfo
->tc_info
[i
].tqp_count
= 1;
612 kinfo
->tc_info
[i
].tc
= 0;
616 memcpy(kinfo
->prio_tc
, hdev
->tm_info
.prio_tc
,
617 FIELD_SIZEOF(struct hnae3_knic_private_info
, prio_tc
));
620 static void hclge_tm_vport_info_update(struct hclge_dev
*hdev
)
622 struct hclge_vport
*vport
= hdev
->vport
;
625 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
626 hclge_tm_vport_tc_info_update(vport
);
632 static void hclge_tm_tc_info_init(struct hclge_dev
*hdev
)
636 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
637 hdev
->tm_info
.tc_info
[i
].tc_id
= i
;
638 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
= HCLGE_SCH_MODE_DWRR
;
639 hdev
->tm_info
.tc_info
[i
].pgid
= 0;
640 hdev
->tm_info
.tc_info
[i
].bw_limit
=
641 hdev
->tm_info
.pg_info
[0].bw_limit
;
644 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
645 hdev
->tm_info
.prio_tc
[i
] =
646 (i
>= hdev
->tm_info
.num_tc
) ? 0 : i
;
648 /* DCB is enabled if we have more than 1 TC or pfc_en is
651 if (hdev
->tm_info
.num_tc
> 1 || hdev
->tm_info
.pfc_en
)
652 hdev
->flag
|= HCLGE_FLAG_DCB_ENABLE
;
654 hdev
->flag
&= ~HCLGE_FLAG_DCB_ENABLE
;
657 static void hclge_tm_pg_info_init(struct hclge_dev
*hdev
)
659 #define BW_PERCENT 100
663 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
666 hdev
->tm_info
.pg_dwrr
[i
] = i
? 0 : BW_PERCENT
;
668 hdev
->tm_info
.pg_info
[i
].pg_id
= i
;
669 hdev
->tm_info
.pg_info
[i
].pg_sch_mode
= HCLGE_SCH_MODE_DWRR
;
671 hdev
->tm_info
.pg_info
[i
].bw_limit
= HCLGE_ETHER_MAX_RATE
;
676 hdev
->tm_info
.pg_info
[i
].tc_bit_map
= hdev
->hw_tc_map
;
677 for (k
= 0; k
< hdev
->tm_info
.num_tc
; k
++)
678 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = BW_PERCENT
;
682 static void hclge_pfc_info_init(struct hclge_dev
*hdev
)
684 if (!(hdev
->flag
& HCLGE_FLAG_DCB_ENABLE
)) {
685 if (hdev
->fc_mode_last_time
== HCLGE_FC_PFC
)
686 dev_warn(&hdev
->pdev
->dev
,
687 "DCB is disable, but last mode is FC_PFC\n");
689 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
690 } else if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
691 /* fc_mode_last_time record the last fc_mode when
692 * DCB is enabled, so that fc_mode can be set to
693 * the correct value when DCB is disabled.
695 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
696 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
700 static void hclge_tm_schd_info_init(struct hclge_dev
*hdev
)
702 hclge_tm_pg_info_init(hdev
);
704 hclge_tm_tc_info_init(hdev
);
706 hclge_tm_vport_info_update(hdev
);
708 hclge_pfc_info_init(hdev
);
711 static int hclge_tm_pg_to_pri_map(struct hclge_dev
*hdev
)
716 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
719 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
721 ret
= hclge_tm_pg_to_pri_map_cfg(
722 hdev
, i
, hdev
->tm_info
.pg_info
[i
].tc_bit_map
);
730 static int hclge_tm_pg_shaper_cfg(struct hclge_dev
*hdev
)
738 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
742 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
743 /* Calc shaper para */
744 ret
= hclge_shaper_para_calc(
745 hdev
->tm_info
.pg_info
[i
].bw_limit
,
747 &ir_b
, &ir_u
, &ir_s
);
751 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
752 HCLGE_SHAPER_BS_U_DEF
,
753 HCLGE_SHAPER_BS_S_DEF
);
754 ret
= hclge_tm_pg_shapping_cfg(hdev
,
755 HCLGE_TM_SHAP_C_BUCKET
, i
,
760 shaper_para
= hclge_tm_get_shapping_para(ir_b
, ir_u
, ir_s
,
761 HCLGE_SHAPER_BS_U_DEF
,
762 HCLGE_SHAPER_BS_S_DEF
);
763 ret
= hclge_tm_pg_shapping_cfg(hdev
,
764 HCLGE_TM_SHAP_P_BUCKET
, i
,
773 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev
*hdev
)
779 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
783 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
785 ret
= hclge_tm_pg_weight_cfg(hdev
, i
, hdev
->tm_info
.pg_dwrr
[i
]);
793 static int hclge_vport_q_to_qs_map(struct hclge_dev
*hdev
,
794 struct hclge_vport
*vport
)
796 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
797 struct hnae3_queue
**tqp
= kinfo
->tqp
;
798 struct hnae3_tc_info
*v_tc_info
;
802 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
803 v_tc_info
= &kinfo
->tc_info
[i
];
804 for (j
= 0; j
< v_tc_info
->tqp_count
; j
++) {
805 struct hnae3_queue
*q
= tqp
[v_tc_info
->tqp_offset
+ j
];
807 ret
= hclge_tm_q_to_qs_map_cfg(hdev
,
808 hclge_get_queue_id(q
),
809 vport
->qs_offset
+ i
);
818 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev
*hdev
)
820 struct hclge_vport
*vport
= hdev
->vport
;
824 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
825 /* Cfg qs -> pri mapping, one by one mapping */
826 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
827 struct hnae3_knic_private_info
*kinfo
=
830 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
831 ret
= hclge_tm_qs_to_pri_map_cfg(
832 hdev
, vport
[k
].qs_offset
+ i
, i
);
837 } else if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
) {
838 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
839 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
840 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
841 ret
= hclge_tm_qs_to_pri_map_cfg(
842 hdev
, vport
[k
].qs_offset
+ i
, k
);
850 /* Cfg q -> qs mapping */
851 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
852 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
862 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev
*hdev
)
869 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
870 ret
= hclge_shaper_para_calc(
871 hdev
->tm_info
.tc_info
[i
].bw_limit
,
872 HCLGE_SHAPER_LVL_PRI
,
873 &ir_b
, &ir_u
, &ir_s
);
877 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
878 HCLGE_SHAPER_BS_U_DEF
,
879 HCLGE_SHAPER_BS_S_DEF
);
880 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
, i
,
885 shaper_para
= hclge_tm_get_shapping_para(ir_b
, ir_u
, ir_s
,
886 HCLGE_SHAPER_BS_U_DEF
,
887 HCLGE_SHAPER_BS_S_DEF
);
888 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
, i
,
897 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport
*vport
)
899 struct hclge_dev
*hdev
= vport
->back
;
904 ret
= hclge_shaper_para_calc(vport
->bw_limit
, HCLGE_SHAPER_LVL_VF
,
905 &ir_b
, &ir_u
, &ir_s
);
909 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
910 HCLGE_SHAPER_BS_U_DEF
,
911 HCLGE_SHAPER_BS_S_DEF
);
912 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
,
913 vport
->vport_id
, shaper_para
);
917 shaper_para
= hclge_tm_get_shapping_para(ir_b
, ir_u
, ir_s
,
918 HCLGE_SHAPER_BS_U_DEF
,
919 HCLGE_SHAPER_BS_S_DEF
);
920 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
,
921 vport
->vport_id
, shaper_para
);
928 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport
*vport
)
930 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
931 struct hclge_dev
*hdev
= vport
->back
;
936 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
937 ret
= hclge_shaper_para_calc(
938 hdev
->tm_info
.tc_info
[i
].bw_limit
,
939 HCLGE_SHAPER_LVL_QSET
,
940 &ir_b
, &ir_u
, &ir_s
);
948 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev
*hdev
)
950 struct hclge_vport
*vport
= hdev
->vport
;
954 /* Need config vport shaper */
955 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
956 ret
= hclge_tm_pri_vnet_base_shaper_pri_cfg(vport
);
960 ret
= hclge_tm_pri_vnet_base_shaper_qs_cfg(vport
);
970 static int hclge_tm_pri_shaper_cfg(struct hclge_dev
*hdev
)
974 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
975 ret
= hclge_tm_pri_tc_base_shaper_cfg(hdev
);
979 ret
= hclge_tm_pri_vnet_base_shaper_cfg(hdev
);
987 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev
*hdev
)
989 struct hclge_vport
*vport
= hdev
->vport
;
990 struct hclge_pg_info
*pg_info
;
995 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
997 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
998 dwrr
= pg_info
->tc_dwrr
[i
];
1000 ret
= hclge_tm_pri_weight_cfg(hdev
, i
, dwrr
);
1004 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1005 ret
= hclge_tm_qs_weight_cfg(
1006 hdev
, vport
[k
].qs_offset
+ i
,
1016 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev
*hdev
)
1018 #define DEFAULT_TC_WEIGHT 1
1019 #define DEFAULT_TC_OFFSET 14
1021 struct hclge_ets_tc_weight_cmd
*ets_weight
;
1022 struct hclge_desc desc
;
1025 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ETS_TC_WEIGHT
, false);
1026 ets_weight
= (struct hclge_ets_tc_weight_cmd
*)desc
.data
;
1028 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1029 struct hclge_pg_info
*pg_info
;
1031 ets_weight
->tc_weight
[i
] = DEFAULT_TC_WEIGHT
;
1033 if (!(hdev
->hw_tc_map
& BIT(i
)))
1037 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
1038 ets_weight
->tc_weight
[i
] = pg_info
->tc_dwrr
[i
];
1041 ets_weight
->weight_offset
= DEFAULT_TC_OFFSET
;
1043 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1046 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport
*vport
)
1048 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1049 struct hclge_dev
*hdev
= vport
->back
;
1054 ret
= hclge_tm_pri_weight_cfg(hdev
, vport
->vport_id
, vport
->dwrr
);
1059 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
1060 ret
= hclge_tm_qs_weight_cfg(
1061 hdev
, vport
->qs_offset
+ i
,
1062 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
]);
1070 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev
*hdev
)
1072 struct hclge_vport
*vport
= hdev
->vport
;
1076 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1077 ret
= hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport
);
1087 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev
*hdev
)
1091 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1092 ret
= hclge_tm_pri_tc_base_dwrr_cfg(hdev
);
1096 if (!hnae3_dev_dcb_supported(hdev
))
1099 ret
= hclge_tm_ets_tc_dwrr_cfg(hdev
);
1100 if (ret
== -EOPNOTSUPP
) {
1101 dev_warn(&hdev
->pdev
->dev
,
1102 "fw %08x does't support ets tc weight cmd\n",
1109 ret
= hclge_tm_pri_vnet_base_dwrr_cfg(hdev
);
1117 static int hclge_tm_map_cfg(struct hclge_dev
*hdev
)
1121 ret
= hclge_up_to_tc_map(hdev
);
1125 ret
= hclge_tm_pg_to_pri_map(hdev
);
1129 return hclge_tm_pri_q_qs_cfg(hdev
);
1132 static int hclge_tm_shaper_cfg(struct hclge_dev
*hdev
)
1136 ret
= hclge_tm_port_shaper_cfg(hdev
);
1140 ret
= hclge_tm_pg_shaper_cfg(hdev
);
1144 return hclge_tm_pri_shaper_cfg(hdev
);
1147 int hclge_tm_dwrr_cfg(struct hclge_dev
*hdev
)
1151 ret
= hclge_tm_pg_dwrr_cfg(hdev
);
1155 return hclge_tm_pri_dwrr_cfg(hdev
);
1158 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev
*hdev
)
1163 /* Only being config on TC-Based scheduler mode */
1164 if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
1167 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
1168 ret
= hclge_tm_pg_schd_mode_cfg(hdev
, i
);
1176 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport
*vport
)
1178 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1179 struct hclge_dev
*hdev
= vport
->back
;
1183 if (vport
->vport_id
>= HNAE3_MAX_TC
)
1186 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, vport
->vport_id
);
1190 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
1191 u8 sch_mode
= hdev
->tm_info
.tc_info
[i
].tc_sch_mode
;
1193 ret
= hclge_tm_qs_schd_mode_cfg(hdev
, vport
->qs_offset
+ i
,
1202 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev
*hdev
)
1204 struct hclge_vport
*vport
= hdev
->vport
;
1208 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1209 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1210 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, i
);
1214 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1215 ret
= hclge_tm_qs_schd_mode_cfg(
1216 hdev
, vport
[k
].qs_offset
+ i
,
1217 HCLGE_SCH_MODE_DWRR
);
1223 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1224 ret
= hclge_tm_schd_mode_vnet_base_cfg(vport
);
1235 static int hclge_tm_schd_mode_hw(struct hclge_dev
*hdev
)
1239 ret
= hclge_tm_lvl2_schd_mode_cfg(hdev
);
1243 return hclge_tm_lvl34_schd_mode_cfg(hdev
);
1246 int hclge_tm_schd_setup_hw(struct hclge_dev
*hdev
)
1250 /* Cfg tm mapping */
1251 ret
= hclge_tm_map_cfg(hdev
);
1256 ret
= hclge_tm_shaper_cfg(hdev
);
1261 ret
= hclge_tm_dwrr_cfg(hdev
);
1265 /* Cfg schd mode for each level schd */
1266 return hclge_tm_schd_mode_hw(hdev
);
1269 static int hclge_pause_param_setup_hw(struct hclge_dev
*hdev
)
1271 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1273 return hclge_pause_param_cfg(hdev
, mac
->mac_addr
,
1274 HCLGE_DEFAULT_PAUSE_TRANS_GAP
,
1275 HCLGE_DEFAULT_PAUSE_TRANS_TIME
);
1278 static int hclge_pfc_setup_hw(struct hclge_dev
*hdev
)
1280 u8 enable_bitmap
= 0;
1282 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
1283 enable_bitmap
= HCLGE_TX_MAC_PAUSE_EN_MSK
|
1284 HCLGE_RX_MAC_PAUSE_EN_MSK
;
1286 return hclge_pfc_pause_en_cfg(hdev
, enable_bitmap
,
1287 hdev
->tm_info
.pfc_en
);
1290 /* Each Tc has a 1024 queue sets to backpress, it divides to
1291 * 32 group, each group contains 32 queue sets, which can be
1292 * represented by u32 bitmap.
1294 static int hclge_bp_setup_hw(struct hclge_dev
*hdev
, u8 tc
)
1298 for (i
= 0; i
< HCLGE_BP_GRP_NUM
; i
++) {
1302 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1303 struct hclge_vport
*vport
= &hdev
->vport
[k
];
1304 u16 qs_id
= vport
->qs_offset
+ tc
;
1307 grp
= hnae3_get_field(qs_id
, HCLGE_BP_GRP_ID_M
,
1309 sub_grp
= hnae3_get_field(qs_id
, HCLGE_BP_SUB_GRP_ID_M
,
1310 HCLGE_BP_SUB_GRP_ID_S
);
1312 qs_bitmap
|= (1 << sub_grp
);
1315 ret
= hclge_tm_qs_bp_cfg(hdev
, tc
, i
, qs_bitmap
);
1323 static int hclge_mac_pause_setup_hw(struct hclge_dev
*hdev
)
1327 switch (hdev
->tm_info
.fc_mode
) {
1332 case HCLGE_FC_RX_PAUSE
:
1336 case HCLGE_FC_TX_PAUSE
:
1353 return hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
1356 static int hclge_tm_bp_setup(struct hclge_dev
*hdev
)
1361 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1362 ret
= hclge_bp_setup_hw(hdev
, i
);
1370 int hclge_pause_setup_hw(struct hclge_dev
*hdev
, bool init
)
1374 ret
= hclge_pause_param_setup_hw(hdev
);
1378 ret
= hclge_mac_pause_setup_hw(hdev
);
1382 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1383 if (!hnae3_dev_dcb_supported(hdev
))
1386 /* GE MAC does not support PFC, when driver is initializing and MAC
1387 * is in GE Mode, ignore the error here, otherwise initialization
1390 ret
= hclge_pfc_setup_hw(hdev
);
1391 if (init
&& ret
== -EOPNOTSUPP
)
1392 dev_warn(&hdev
->pdev
->dev
, "GE MAC does not support pfc\n");
1394 dev_err(&hdev
->pdev
->dev
, "config pfc failed! ret = %d\n",
1399 return hclge_tm_bp_setup(hdev
);
1402 void hclge_tm_prio_tc_info_update(struct hclge_dev
*hdev
, u8
*prio_tc
)
1404 struct hclge_vport
*vport
= hdev
->vport
;
1405 struct hnae3_knic_private_info
*kinfo
;
1408 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
1409 hdev
->tm_info
.prio_tc
[i
] = prio_tc
[i
];
1411 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1412 kinfo
= &vport
[k
].nic
.kinfo
;
1413 kinfo
->prio_tc
[i
] = prio_tc
[i
];
1418 void hclge_tm_schd_info_update(struct hclge_dev
*hdev
, u8 num_tc
)
1423 hdev
->tm_info
.num_tc
= num_tc
;
1425 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1430 hdev
->tm_info
.num_tc
= 1;
1433 hdev
->hw_tc_map
= bit_map
;
1435 hclge_tm_schd_info_init(hdev
);
1438 void hclge_tm_pfc_info_update(struct hclge_dev
*hdev
)
1440 /* DCB is enabled if we have more than 1 TC or pfc_en is
1443 if (hdev
->tm_info
.num_tc
> 1 || hdev
->tm_info
.pfc_en
)
1444 hdev
->flag
|= HCLGE_FLAG_DCB_ENABLE
;
1446 hdev
->flag
&= ~HCLGE_FLAG_DCB_ENABLE
;
1448 hclge_pfc_info_init(hdev
);
1451 int hclge_tm_init_hw(struct hclge_dev
*hdev
, bool init
)
1455 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
1456 (hdev
->tx_sch_mode
!= HCLGE_FLAG_VNET_BASE_SCH_MODE
))
1459 ret
= hclge_tm_schd_setup_hw(hdev
);
1463 ret
= hclge_pause_setup_hw(hdev
, init
);
1470 int hclge_tm_schd_init(struct hclge_dev
*hdev
)
1472 /* fc_mode is HCLGE_FC_FULL on reset */
1473 hdev
->tm_info
.fc_mode
= HCLGE_FC_FULL
;
1474 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
1476 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
&&
1477 hdev
->tm_info
.num_pg
!= 1)
1480 hclge_tm_schd_info_init(hdev
);
1482 return hclge_tm_init_hw(hdev
, true);
1485 int hclge_tm_vport_map_update(struct hclge_dev
*hdev
)
1487 struct hclge_vport
*vport
= hdev
->vport
;
1490 hclge_tm_vport_tc_info_update(vport
);
1492 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
1496 if (!(hdev
->flag
& HCLGE_FLAG_DCB_ENABLE
))
1499 return hclge_tm_bp_setup(hdev
);