1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level
{
11 HCLGE_SHAPER_LVL_PRI
= 0,
12 HCLGE_SHAPER_LVL_PG
= 1,
13 HCLGE_SHAPER_LVL_PORT
= 2,
14 HCLGE_SHAPER_LVL_QSET
= 3,
15 HCLGE_SHAPER_LVL_CNT
= 4,
16 HCLGE_SHAPER_LVL_VF
= 0,
17 HCLGE_SHAPER_LVL_PF
= 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
38 * @return: 0: calculate sucessful, negative: fail
40 static int hclge_shaper_para_calc(u32 ir
, u8 shaper_level
,
41 struct hclge_shaper_ir_para
*ir_para
,
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
48 static const u16 tick_array
[HCLGE_SHAPER_LVL_CNT
] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
60 if (shaper_level
>= HCLGE_SHAPER_LVL_CNT
||
64 tick
= tick_array
[shaper_level
];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc
= (DEFAULT_DIVISOR_IR_B
+ (tick
>> 1) - 1) / tick
;
76 ir_para
->ir_b
= DEFAULT_SHAPER_IR_B
;
81 } else if (ir_calc
> ir
) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc
>= ir
&& ir
) {
85 ir_calc
= DEFAULT_DIVISOR_IR_B
/
86 (tick
* (1 << ir_s_calc
));
89 ir_para
->ir_b
= (ir
* tick
* (1 << ir_s_calc
) +
90 (DIVISOR_CLK
>> 1)) / DIVISOR_CLK
;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc
< ir
) {
97 numerator
= DEFAULT_DIVISOR_IR_B
* (1 << ir_u_calc
);
98 ir_calc
= (numerator
+ (tick
>> 1)) / tick
;
102 ir_para
->ir_b
= DEFAULT_SHAPER_IR_B
;
104 u32 denominator
= DIVISOR_CLK
* (1 << --ir_u_calc
);
105 ir_para
->ir_b
= (ir
* tick
+ (denominator
>> 1)) /
110 ir_para
->ir_u
= ir_u_calc
;
111 ir_para
->ir_s
= ir_s_calc
;
116 static int hclge_pfc_stats_get(struct hclge_dev
*hdev
,
117 enum hclge_opcode_type opcode
, u64
*stats
)
119 struct hclge_desc desc
[HCLGE_TM_PFC_PKT_GET_CMD_NUM
];
122 if (!(opcode
== HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
||
123 opcode
== HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
))
126 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
- 1; i
++) {
127 hclge_cmd_setup_basic_desc(&desc
[i
], opcode
, true);
128 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
131 hclge_cmd_setup_basic_desc(&desc
[i
], opcode
, true);
133 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_TM_PFC_PKT_GET_CMD_NUM
);
137 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
; i
++) {
138 struct hclge_pfc_stats_cmd
*pfc_stats
=
139 (struct hclge_pfc_stats_cmd
*)desc
[i
].data
;
141 for (j
= 0; j
< HCLGE_TM_PFC_NUM_GET_PER_CMD
; j
++) {
142 u32 index
= i
* HCLGE_TM_PFC_PKT_GET_CMD_NUM
+ j
;
144 if (index
< HCLGE_MAX_TC_NUM
)
146 le64_to_cpu(pfc_stats
->pkt_num
[j
]);
152 int hclge_pfc_rx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
154 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
, stats
);
157 int hclge_pfc_tx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
159 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
, stats
);
162 int hclge_mac_pause_en_cfg(struct hclge_dev
*hdev
, bool tx
, bool rx
)
164 struct hclge_desc desc
;
166 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PAUSE_EN
, false);
168 desc
.data
[0] = cpu_to_le32((tx
? HCLGE_TX_MAC_PAUSE_EN_MSK
: 0) |
169 (rx
? HCLGE_RX_MAC_PAUSE_EN_MSK
: 0));
171 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
174 static int hclge_pfc_pause_en_cfg(struct hclge_dev
*hdev
, u8 tx_rx_bitmap
,
177 struct hclge_desc desc
;
178 struct hclge_pfc_en_cmd
*pfc
= (struct hclge_pfc_en_cmd
*)desc
.data
;
180 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PFC_PAUSE_EN
, false);
182 pfc
->tx_rx_en_bitmap
= tx_rx_bitmap
;
183 pfc
->pri_en_bitmap
= pfc_bitmap
;
185 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
188 static int hclge_pause_param_cfg(struct hclge_dev
*hdev
, const u8
*addr
,
189 u8 pause_trans_gap
, u16 pause_trans_time
)
191 struct hclge_cfg_pause_param_cmd
*pause_param
;
192 struct hclge_desc desc
;
194 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
196 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, false);
198 ether_addr_copy(pause_param
->mac_addr
, addr
);
199 ether_addr_copy(pause_param
->mac_addr_extra
, addr
);
200 pause_param
->pause_trans_gap
= pause_trans_gap
;
201 pause_param
->pause_trans_time
= cpu_to_le16(pause_trans_time
);
203 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
206 int hclge_pause_addr_cfg(struct hclge_dev
*hdev
, const u8
*mac_addr
)
208 struct hclge_cfg_pause_param_cmd
*pause_param
;
209 struct hclge_desc desc
;
214 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
216 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, true);
218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
222 trans_gap
= pause_param
->pause_trans_gap
;
223 trans_time
= le16_to_cpu(pause_param
->pause_trans_time
);
225 return hclge_pause_param_cfg(hdev
, mac_addr
, trans_gap
, trans_time
);
228 static int hclge_fill_pri_array(struct hclge_dev
*hdev
, u8
*pri
, u8 pri_id
)
232 tc
= hdev
->tm_info
.prio_tc
[pri_id
];
234 if (tc
>= hdev
->tm_info
.num_tc
)
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
246 pri
[pri_id
>> 1] |= tc
<< ((pri_id
& 1) * 4);
251 static int hclge_up_to_tc_map(struct hclge_dev
*hdev
)
253 struct hclge_desc desc
;
254 u8
*pri
= (u8
*)desc
.data
;
258 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PRI_TO_TC_MAPPING
, false);
260 for (pri_id
= 0; pri_id
< HNAE3_MAX_USER_PRIO
; pri_id
++) {
261 ret
= hclge_fill_pri_array(hdev
, pri
, pri_id
);
266 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev
*hdev
,
270 u8 pg_id
, u8 pri_bit_map
)
272 struct hclge_pg_to_pri_link_cmd
*map
;
273 struct hclge_desc desc
;
275 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, false);
277 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
280 map
->pri_bit_map
= pri_bit_map
;
282 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev
*hdev
,
288 struct hclge_qs_to_pri_link_cmd
*map
;
289 struct hclge_desc desc
;
291 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, false);
293 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
295 map
->qs_id
= cpu_to_le16(qs_id
);
297 map
->link_vld
= HCLGE_TM_QS_PRI_LINK_VLD_MSK
;
299 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev
*hdev
,
305 struct hclge_nq_to_qs_link_cmd
*map
;
306 struct hclge_desc desc
;
310 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, false);
312 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
314 map
->nq_id
= cpu_to_le16(q_id
);
316 /* convert qs_id to the following format to support qset_id >= 1024
317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
321 * | qs_id_h | vld | qs_id_l |
323 qs_id_l
= hnae3_get_field(qs_id
, HCLGE_TM_QS_ID_L_MSK
,
325 qs_id_h
= hnae3_get_field(qs_id
, HCLGE_TM_QS_ID_H_MSK
,
327 hnae3_set_field(qs_id
, HCLGE_TM_QS_ID_L_MSK
, HCLGE_TM_QS_ID_L_S
,
329 hnae3_set_field(qs_id
, HCLGE_TM_QS_ID_H_EXT_MSK
, HCLGE_TM_QS_ID_H_EXT_S
,
331 map
->qset_id
= cpu_to_le16(qs_id
| HCLGE_TM_Q_QS_LINK_VLD_MSK
);
333 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
336 static int hclge_tm_pg_weight_cfg(struct hclge_dev
*hdev
, u8 pg_id
,
339 struct hclge_pg_weight_cmd
*weight
;
340 struct hclge_desc desc
;
342 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, false);
344 weight
= (struct hclge_pg_weight_cmd
*)desc
.data
;
346 weight
->pg_id
= pg_id
;
349 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
352 static int hclge_tm_pri_weight_cfg(struct hclge_dev
*hdev
, u8 pri_id
,
355 struct hclge_priority_weight_cmd
*weight
;
356 struct hclge_desc desc
;
358 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, false);
360 weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
362 weight
->pri_id
= pri_id
;
365 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
368 static int hclge_tm_qs_weight_cfg(struct hclge_dev
*hdev
, u16 qs_id
,
371 struct hclge_qs_weight_cmd
*weight
;
372 struct hclge_desc desc
;
374 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, false);
376 weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
378 weight
->qs_id
= cpu_to_le16(qs_id
);
381 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
384 static u32
hclge_tm_get_shapping_para(u8 ir_b
, u8 ir_u
, u8 ir_s
,
387 u32 shapping_para
= 0;
389 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
390 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
391 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
392 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
393 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
395 return shapping_para
;
398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev
*hdev
,
399 enum hclge_shap_bucket bucket
, u8 pg_id
,
400 u32 shapping_para
, u32 rate
)
402 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
403 enum hclge_opcode_type opcode
;
404 struct hclge_desc desc
;
406 opcode
= bucket
? HCLGE_OPC_TM_PG_P_SHAPPING
:
407 HCLGE_OPC_TM_PG_C_SHAPPING
;
408 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
410 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
412 shap_cfg_cmd
->pg_id
= pg_id
;
414 shap_cfg_cmd
->pg_shapping_para
= cpu_to_le32(shapping_para
);
416 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
418 shap_cfg_cmd
->pg_rate
= cpu_to_le32(rate
);
420 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
423 static int hclge_tm_port_shaper_cfg(struct hclge_dev
*hdev
)
425 struct hclge_port_shapping_cmd
*shap_cfg_cmd
;
426 struct hclge_shaper_ir_para ir_para
;
427 struct hclge_desc desc
;
431 ret
= hclge_shaper_para_calc(hdev
->hw
.mac
.speed
, HCLGE_SHAPER_LVL_PORT
,
433 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
437 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, false);
438 shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
440 shapping_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
442 HCLGE_SHAPER_BS_U_DEF
,
443 HCLGE_SHAPER_BS_S_DEF
);
445 shap_cfg_cmd
->port_shapping_para
= cpu_to_le32(shapping_para
);
447 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
449 shap_cfg_cmd
->port_rate
= cpu_to_le32(hdev
->hw
.mac
.speed
);
451 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev
*hdev
,
455 enum hclge_shap_bucket bucket
, u8 pri_id
,
456 u32 shapping_para
, u32 rate
)
458 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
459 enum hclge_opcode_type opcode
;
460 struct hclge_desc desc
;
462 opcode
= bucket
? HCLGE_OPC_TM_PRI_P_SHAPPING
:
463 HCLGE_OPC_TM_PRI_C_SHAPPING
;
465 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
467 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
469 shap_cfg_cmd
->pri_id
= pri_id
;
471 shap_cfg_cmd
->pri_shapping_para
= cpu_to_le32(shapping_para
);
473 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
475 shap_cfg_cmd
->pri_rate
= cpu_to_le32(rate
);
477 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pg_id
)
482 struct hclge_desc desc
;
484 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, false);
486 if (hdev
->tm_info
.pg_info
[pg_id
].pg_sch_mode
== HCLGE_SCH_MODE_DWRR
)
487 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
491 desc
.data
[0] = cpu_to_le32(pg_id
);
493 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
498 struct hclge_desc desc
;
500 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, false);
502 if (hdev
->tm_info
.tc_info
[pri_id
].tc_sch_mode
== HCLGE_SCH_MODE_DWRR
)
503 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
507 desc
.data
[0] = cpu_to_le32(pri_id
);
509 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 mode
)
514 struct hclge_desc desc
;
516 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, false);
518 if (mode
== HCLGE_SCH_MODE_DWRR
)
519 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
523 desc
.data
[0] = cpu_to_le32(qs_id
);
525 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
528 static int hclge_tm_qs_bp_cfg(struct hclge_dev
*hdev
, u8 tc
, u8 grp_id
,
531 struct hclge_bp_to_qs_map_cmd
*bp_to_qs_map_cmd
;
532 struct hclge_desc desc
;
534 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_BP_TO_QSET_MAPPING
,
537 bp_to_qs_map_cmd
= (struct hclge_bp_to_qs_map_cmd
*)desc
.data
;
539 bp_to_qs_map_cmd
->tc_id
= tc
;
540 bp_to_qs_map_cmd
->qs_group_id
= grp_id
;
541 bp_to_qs_map_cmd
->qs_bit_map
= cpu_to_le32(bit_map
);
543 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
546 int hclge_tm_qs_shaper_cfg(struct hclge_vport
*vport
, int max_tx_rate
)
548 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
549 struct hclge_qs_shapping_cmd
*shap_cfg_cmd
;
550 struct hclge_shaper_ir_para ir_para
;
551 struct hclge_dev
*hdev
= vport
->back
;
552 struct hclge_desc desc
;
557 max_tx_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
559 ret
= hclge_shaper_para_calc(max_tx_rate
, HCLGE_SHAPER_LVL_QSET
,
561 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
565 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
567 HCLGE_SHAPER_BS_U_DEF
,
568 HCLGE_SHAPER_BS_S_DEF
);
570 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
571 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QCN_SHAPPING_CFG
,
574 shap_cfg_cmd
= (struct hclge_qs_shapping_cmd
*)desc
.data
;
575 shap_cfg_cmd
->qs_id
= cpu_to_le16(vport
->qs_offset
+ i
);
576 shap_cfg_cmd
->qs_shapping_para
= cpu_to_le32(shaper_para
);
578 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
579 shap_cfg_cmd
->qs_rate
= cpu_to_le32(max_tx_rate
);
581 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
583 dev_err(&hdev
->pdev
->dev
,
584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585 vport
->vport_id
, shap_cfg_cmd
->qs_id
,
594 static u16
hclge_vport_get_max_rss_size(struct hclge_vport
*vport
)
596 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
597 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
598 struct hclge_dev
*hdev
= vport
->back
;
599 u16 max_rss_size
= 0;
602 if (!tc_info
->mqprio_active
)
603 return vport
->alloc_tqps
/ tc_info
->num_tc
;
605 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
606 if (!(hdev
->hw_tc_map
& BIT(i
)) || i
>= tc_info
->num_tc
)
608 if (max_rss_size
< tc_info
->tqp_count
[i
])
609 max_rss_size
= tc_info
->tqp_count
[i
];
615 static u16
hclge_vport_get_tqp_num(struct hclge_vport
*vport
)
617 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
618 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
619 struct hclge_dev
*hdev
= vport
->back
;
623 if (!tc_info
->mqprio_active
)
624 return kinfo
->rss_size
* tc_info
->num_tc
;
626 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
627 if (hdev
->hw_tc_map
& BIT(i
) && i
< tc_info
->num_tc
)
628 sum
+= tc_info
->tqp_count
[i
];
634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport
*vport
)
636 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
637 struct hclge_dev
*hdev
= vport
->back
;
638 u16 vport_max_rss_size
;
641 /* TC configuration is shared by PF/VF in one port, only allow
642 * one tc for VF for simplicity. VF's vport_id is non zero.
644 if (vport
->vport_id
) {
645 kinfo
->tc_info
.num_tc
= 1;
646 vport
->qs_offset
= HNAE3_MAX_TC
+
647 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
;
648 vport_max_rss_size
= hdev
->vf_rss_size_max
;
650 kinfo
->tc_info
.num_tc
=
651 min_t(u16
, vport
->alloc_tqps
, hdev
->tm_info
.num_tc
);
652 vport
->qs_offset
= 0;
653 vport_max_rss_size
= hdev
->pf_rss_size_max
;
656 max_rss_size
= min_t(u16
, vport_max_rss_size
,
657 hclge_vport_get_max_rss_size(vport
));
659 /* Set to user value, no larger than max_rss_size. */
660 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
661 kinfo
->req_rss_size
<= max_rss_size
) {
662 dev_info(&hdev
->pdev
->dev
, "rss changes from %u to %u\n",
663 kinfo
->rss_size
, kinfo
->req_rss_size
);
664 kinfo
->rss_size
= kinfo
->req_rss_size
;
665 } else if (kinfo
->rss_size
> max_rss_size
||
666 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
)) {
667 /* Set to the maximum specification value (max_rss_size). */
668 kinfo
->rss_size
= max_rss_size
;
672 static void hclge_tm_vport_tc_info_update(struct hclge_vport
*vport
)
674 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
675 struct hclge_dev
*hdev
= vport
->back
;
678 hclge_tm_update_kinfo_rss_size(vport
);
679 kinfo
->num_tqps
= hclge_vport_get_tqp_num(vport
);
680 vport
->dwrr
= 100; /* 100 percent as init */
681 vport
->alloc_rss_size
= kinfo
->rss_size
;
682 vport
->bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
684 /* when enable mqprio, the tc_info has been updated. */
685 if (kinfo
->tc_info
.mqprio_active
)
688 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
689 if (hdev
->hw_tc_map
& BIT(i
) && i
< kinfo
->tc_info
.num_tc
) {
690 kinfo
->tc_info
.tqp_offset
[i
] = i
* kinfo
->rss_size
;
691 kinfo
->tc_info
.tqp_count
[i
] = kinfo
->rss_size
;
693 /* Set to default queue if TC is disable */
694 kinfo
->tc_info
.tqp_offset
[i
] = 0;
695 kinfo
->tc_info
.tqp_count
[i
] = 1;
699 memcpy(kinfo
->tc_info
.prio_tc
, hdev
->tm_info
.prio_tc
,
700 sizeof_field(struct hnae3_tc_info
, prio_tc
));
703 static void hclge_tm_vport_info_update(struct hclge_dev
*hdev
)
705 struct hclge_vport
*vport
= hdev
->vport
;
708 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
709 hclge_tm_vport_tc_info_update(vport
);
715 static void hclge_tm_tc_info_init(struct hclge_dev
*hdev
)
719 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
720 hdev
->tm_info
.tc_info
[i
].tc_id
= i
;
721 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
= HCLGE_SCH_MODE_DWRR
;
722 hdev
->tm_info
.tc_info
[i
].pgid
= 0;
723 hdev
->tm_info
.tc_info
[i
].bw_limit
=
724 hdev
->tm_info
.pg_info
[0].bw_limit
;
727 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
728 hdev
->tm_info
.prio_tc
[i
] =
729 (i
>= hdev
->tm_info
.num_tc
) ? 0 : i
;
732 static void hclge_tm_pg_info_init(struct hclge_dev
*hdev
)
734 #define BW_PERCENT 100
738 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
741 hdev
->tm_info
.pg_dwrr
[i
] = i
? 0 : BW_PERCENT
;
743 hdev
->tm_info
.pg_info
[i
].pg_id
= i
;
744 hdev
->tm_info
.pg_info
[i
].pg_sch_mode
= HCLGE_SCH_MODE_DWRR
;
746 hdev
->tm_info
.pg_info
[i
].bw_limit
=
747 hdev
->ae_dev
->dev_specs
.max_tm_rate
;
752 hdev
->tm_info
.pg_info
[i
].tc_bit_map
= hdev
->hw_tc_map
;
753 for (k
= 0; k
< hdev
->tm_info
.num_tc
; k
++)
754 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = BW_PERCENT
;
755 for (; k
< HNAE3_MAX_TC
; k
++)
756 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = 0;
760 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev
*hdev
)
762 if (hdev
->tm_info
.num_tc
== 1 && !hdev
->tm_info
.pfc_en
) {
763 if (hdev
->fc_mode_last_time
== HCLGE_FC_PFC
)
764 dev_warn(&hdev
->pdev
->dev
,
765 "Only 1 tc used, but last mode is FC_PFC\n");
767 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
768 } else if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
769 /* fc_mode_last_time record the last fc_mode when
770 * DCB is enabled, so that fc_mode can be set to
771 * the correct value when DCB is disabled.
773 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
774 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
778 static void hclge_update_fc_mode(struct hclge_dev
*hdev
)
780 if (!hdev
->tm_info
.pfc_en
) {
781 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
785 if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
786 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
787 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
791 void hclge_tm_pfc_info_update(struct hclge_dev
*hdev
)
793 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
794 hclge_update_fc_mode(hdev
);
796 hclge_update_fc_mode_by_dcb_flag(hdev
);
799 static void hclge_tm_schd_info_init(struct hclge_dev
*hdev
)
801 hclge_tm_pg_info_init(hdev
);
803 hclge_tm_tc_info_init(hdev
);
805 hclge_tm_vport_info_update(hdev
);
807 hclge_tm_pfc_info_update(hdev
);
810 static int hclge_tm_pg_to_pri_map(struct hclge_dev
*hdev
)
815 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
818 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
820 ret
= hclge_tm_pg_to_pri_map_cfg(
821 hdev
, i
, hdev
->tm_info
.pg_info
[i
].tc_bit_map
);
829 static int hclge_tm_pg_shaper_cfg(struct hclge_dev
*hdev
)
831 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
832 struct hclge_shaper_ir_para ir_para
;
838 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
842 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
843 u32 rate
= hdev
->tm_info
.pg_info
[i
].bw_limit
;
845 /* Calc shaper para */
846 ret
= hclge_shaper_para_calc(rate
, HCLGE_SHAPER_LVL_PG
,
847 &ir_para
, max_tm_rate
);
851 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
852 HCLGE_SHAPER_BS_U_DEF
,
853 HCLGE_SHAPER_BS_S_DEF
);
854 ret
= hclge_tm_pg_shapping_cfg(hdev
,
855 HCLGE_TM_SHAP_C_BUCKET
, i
,
860 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
,
863 HCLGE_SHAPER_BS_U_DEF
,
864 HCLGE_SHAPER_BS_S_DEF
);
865 ret
= hclge_tm_pg_shapping_cfg(hdev
,
866 HCLGE_TM_SHAP_P_BUCKET
, i
,
875 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev
*hdev
)
881 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
885 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
887 ret
= hclge_tm_pg_weight_cfg(hdev
, i
, hdev
->tm_info
.pg_dwrr
[i
]);
895 static int hclge_vport_q_to_qs_map(struct hclge_dev
*hdev
,
896 struct hclge_vport
*vport
)
898 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
899 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
900 struct hnae3_queue
**tqp
= kinfo
->tqp
;
904 for (i
= 0; i
< tc_info
->num_tc
; i
++) {
905 for (j
= 0; j
< tc_info
->tqp_count
[i
]; j
++) {
906 struct hnae3_queue
*q
= tqp
[tc_info
->tqp_offset
[i
] + j
];
908 ret
= hclge_tm_q_to_qs_map_cfg(hdev
,
909 hclge_get_queue_id(q
),
910 vport
->qs_offset
+ i
);
919 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev
*hdev
)
921 struct hclge_vport
*vport
= hdev
->vport
;
925 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
926 /* Cfg qs -> pri mapping, one by one mapping */
927 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
928 struct hnae3_knic_private_info
*kinfo
=
931 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
932 ret
= hclge_tm_qs_to_pri_map_cfg(
933 hdev
, vport
[k
].qs_offset
+ i
, i
);
938 } else if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
) {
939 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
940 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
941 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
942 ret
= hclge_tm_qs_to_pri_map_cfg(
943 hdev
, vport
[k
].qs_offset
+ i
, k
);
951 /* Cfg q -> qs mapping */
952 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
953 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
963 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev
*hdev
)
965 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
966 struct hclge_shaper_ir_para ir_para
;
971 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
972 u32 rate
= hdev
->tm_info
.tc_info
[i
].bw_limit
;
974 ret
= hclge_shaper_para_calc(rate
, HCLGE_SHAPER_LVL_PRI
,
975 &ir_para
, max_tm_rate
);
979 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
980 HCLGE_SHAPER_BS_U_DEF
,
981 HCLGE_SHAPER_BS_S_DEF
);
982 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
, i
,
987 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
,
990 HCLGE_SHAPER_BS_U_DEF
,
991 HCLGE_SHAPER_BS_S_DEF
);
992 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
, i
,
1001 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport
*vport
)
1003 struct hclge_dev
*hdev
= vport
->back
;
1004 struct hclge_shaper_ir_para ir_para
;
1008 ret
= hclge_shaper_para_calc(vport
->bw_limit
, HCLGE_SHAPER_LVL_VF
,
1010 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
1014 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
1015 HCLGE_SHAPER_BS_U_DEF
,
1016 HCLGE_SHAPER_BS_S_DEF
);
1017 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
,
1018 vport
->vport_id
, shaper_para
,
1023 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
1025 HCLGE_SHAPER_BS_U_DEF
,
1026 HCLGE_SHAPER_BS_S_DEF
);
1027 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
,
1028 vport
->vport_id
, shaper_para
,
1036 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport
*vport
)
1038 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1039 struct hclge_dev
*hdev
= vport
->back
;
1040 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
1041 struct hclge_shaper_ir_para ir_para
;
1045 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1046 ret
= hclge_shaper_para_calc(hdev
->tm_info
.tc_info
[i
].bw_limit
,
1047 HCLGE_SHAPER_LVL_QSET
,
1048 &ir_para
, max_tm_rate
);
1056 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev
*hdev
)
1058 struct hclge_vport
*vport
= hdev
->vport
;
1062 /* Need config vport shaper */
1063 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1064 ret
= hclge_tm_pri_vnet_base_shaper_pri_cfg(vport
);
1068 ret
= hclge_tm_pri_vnet_base_shaper_qs_cfg(vport
);
1078 static int hclge_tm_pri_shaper_cfg(struct hclge_dev
*hdev
)
1082 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1083 ret
= hclge_tm_pri_tc_base_shaper_cfg(hdev
);
1087 ret
= hclge_tm_pri_vnet_base_shaper_cfg(hdev
);
1095 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev
*hdev
)
1097 struct hclge_vport
*vport
= hdev
->vport
;
1098 struct hclge_pg_info
*pg_info
;
1103 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1105 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
1106 dwrr
= pg_info
->tc_dwrr
[i
];
1108 ret
= hclge_tm_pri_weight_cfg(hdev
, i
, dwrr
);
1112 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1113 ret
= hclge_tm_qs_weight_cfg(
1114 hdev
, vport
[k
].qs_offset
+ i
,
1124 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev
*hdev
)
1126 #define DEFAULT_TC_WEIGHT 1
1127 #define DEFAULT_TC_OFFSET 14
1129 struct hclge_ets_tc_weight_cmd
*ets_weight
;
1130 struct hclge_desc desc
;
1133 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ETS_TC_WEIGHT
, false);
1134 ets_weight
= (struct hclge_ets_tc_weight_cmd
*)desc
.data
;
1136 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1137 struct hclge_pg_info
*pg_info
;
1139 ets_weight
->tc_weight
[i
] = DEFAULT_TC_WEIGHT
;
1141 if (!(hdev
->hw_tc_map
& BIT(i
)))
1145 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
1146 ets_weight
->tc_weight
[i
] = pg_info
->tc_dwrr
[i
];
1149 ets_weight
->weight_offset
= DEFAULT_TC_OFFSET
;
1151 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1154 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport
*vport
)
1156 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1157 struct hclge_dev
*hdev
= vport
->back
;
1162 ret
= hclge_tm_pri_weight_cfg(hdev
, vport
->vport_id
, vport
->dwrr
);
1167 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1168 ret
= hclge_tm_qs_weight_cfg(
1169 hdev
, vport
->qs_offset
+ i
,
1170 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
]);
1178 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev
*hdev
)
1180 struct hclge_vport
*vport
= hdev
->vport
;
1184 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1185 ret
= hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport
);
1195 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev
*hdev
)
1199 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1200 ret
= hclge_tm_pri_tc_base_dwrr_cfg(hdev
);
1204 if (!hnae3_dev_dcb_supported(hdev
))
1207 ret
= hclge_tm_ets_tc_dwrr_cfg(hdev
);
1208 if (ret
== -EOPNOTSUPP
) {
1209 dev_warn(&hdev
->pdev
->dev
,
1210 "fw %08x does't support ets tc weight cmd\n",
1217 ret
= hclge_tm_pri_vnet_base_dwrr_cfg(hdev
);
1225 static int hclge_tm_map_cfg(struct hclge_dev
*hdev
)
1229 ret
= hclge_up_to_tc_map(hdev
);
1233 ret
= hclge_tm_pg_to_pri_map(hdev
);
1237 return hclge_tm_pri_q_qs_cfg(hdev
);
1240 static int hclge_tm_shaper_cfg(struct hclge_dev
*hdev
)
1244 ret
= hclge_tm_port_shaper_cfg(hdev
);
1248 ret
= hclge_tm_pg_shaper_cfg(hdev
);
1252 return hclge_tm_pri_shaper_cfg(hdev
);
1255 int hclge_tm_dwrr_cfg(struct hclge_dev
*hdev
)
1259 ret
= hclge_tm_pg_dwrr_cfg(hdev
);
1263 return hclge_tm_pri_dwrr_cfg(hdev
);
1266 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev
*hdev
)
1271 /* Only being config on TC-Based scheduler mode */
1272 if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
1275 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
1276 ret
= hclge_tm_pg_schd_mode_cfg(hdev
, i
);
1284 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport
*vport
)
1286 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1287 struct hclge_dev
*hdev
= vport
->back
;
1291 if (vport
->vport_id
>= HNAE3_MAX_TC
)
1294 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, vport
->vport_id
);
1298 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1299 u8 sch_mode
= hdev
->tm_info
.tc_info
[i
].tc_sch_mode
;
1301 ret
= hclge_tm_qs_schd_mode_cfg(hdev
, vport
->qs_offset
+ i
,
1310 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev
*hdev
)
1312 struct hclge_vport
*vport
= hdev
->vport
;
1316 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1317 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1318 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, i
);
1322 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1323 ret
= hclge_tm_qs_schd_mode_cfg(
1324 hdev
, vport
[k
].qs_offset
+ i
,
1325 HCLGE_SCH_MODE_DWRR
);
1331 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1332 ret
= hclge_tm_schd_mode_vnet_base_cfg(vport
);
1343 static int hclge_tm_schd_mode_hw(struct hclge_dev
*hdev
)
1347 ret
= hclge_tm_lvl2_schd_mode_cfg(hdev
);
1351 return hclge_tm_lvl34_schd_mode_cfg(hdev
);
1354 int hclge_tm_schd_setup_hw(struct hclge_dev
*hdev
)
1358 /* Cfg tm mapping */
1359 ret
= hclge_tm_map_cfg(hdev
);
1364 ret
= hclge_tm_shaper_cfg(hdev
);
1369 ret
= hclge_tm_dwrr_cfg(hdev
);
1373 /* Cfg schd mode for each level schd */
1374 return hclge_tm_schd_mode_hw(hdev
);
1377 static int hclge_pause_param_setup_hw(struct hclge_dev
*hdev
)
1379 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1381 return hclge_pause_param_cfg(hdev
, mac
->mac_addr
,
1382 HCLGE_DEFAULT_PAUSE_TRANS_GAP
,
1383 HCLGE_DEFAULT_PAUSE_TRANS_TIME
);
1386 static int hclge_pfc_setup_hw(struct hclge_dev
*hdev
)
1388 u8 enable_bitmap
= 0;
1390 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
1391 enable_bitmap
= HCLGE_TX_MAC_PAUSE_EN_MSK
|
1392 HCLGE_RX_MAC_PAUSE_EN_MSK
;
1394 return hclge_pfc_pause_en_cfg(hdev
, enable_bitmap
,
1395 hdev
->tm_info
.pfc_en
);
1398 /* for the queues that use for backpress, divides to several groups,
1399 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1401 static int hclge_bp_setup_hw(struct hclge_dev
*hdev
, u8 tc
)
1403 u16 grp_id_shift
= HCLGE_BP_GRP_ID_S
;
1404 u16 grp_id_mask
= HCLGE_BP_GRP_ID_M
;
1405 u8 grp_num
= HCLGE_BP_GRP_NUM
;
1408 if (hdev
->num_tqps
> HCLGE_TQP_MAX_SIZE_DEV_V2
) {
1409 grp_num
= HCLGE_BP_EXT_GRP_NUM
;
1410 grp_id_mask
= HCLGE_BP_EXT_GRP_ID_M
;
1411 grp_id_shift
= HCLGE_BP_EXT_GRP_ID_S
;
1414 for (i
= 0; i
< grp_num
; i
++) {
1418 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1419 struct hclge_vport
*vport
= &hdev
->vport
[k
];
1420 u16 qs_id
= vport
->qs_offset
+ tc
;
1423 grp
= hnae3_get_field(qs_id
, grp_id_mask
, grp_id_shift
);
1424 sub_grp
= hnae3_get_field(qs_id
, HCLGE_BP_SUB_GRP_ID_M
,
1425 HCLGE_BP_SUB_GRP_ID_S
);
1427 qs_bitmap
|= (1 << sub_grp
);
1430 ret
= hclge_tm_qs_bp_cfg(hdev
, tc
, i
, qs_bitmap
);
1438 static int hclge_mac_pause_setup_hw(struct hclge_dev
*hdev
)
1442 switch (hdev
->tm_info
.fc_mode
) {
1447 case HCLGE_FC_RX_PAUSE
:
1451 case HCLGE_FC_TX_PAUSE
:
1468 return hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
1471 static int hclge_tm_bp_setup(struct hclge_dev
*hdev
)
1476 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1477 ret
= hclge_bp_setup_hw(hdev
, i
);
1485 int hclge_pause_setup_hw(struct hclge_dev
*hdev
, bool init
)
1489 ret
= hclge_pause_param_setup_hw(hdev
);
1493 ret
= hclge_mac_pause_setup_hw(hdev
);
1497 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1498 if (!hnae3_dev_dcb_supported(hdev
))
1501 /* GE MAC does not support PFC, when driver is initializing and MAC
1502 * is in GE Mode, ignore the error here, otherwise initialization
1505 ret
= hclge_pfc_setup_hw(hdev
);
1506 if (init
&& ret
== -EOPNOTSUPP
)
1507 dev_warn(&hdev
->pdev
->dev
, "GE MAC does not support pfc\n");
1509 dev_err(&hdev
->pdev
->dev
, "config pfc failed! ret = %d\n",
1514 return hclge_tm_bp_setup(hdev
);
1517 void hclge_tm_prio_tc_info_update(struct hclge_dev
*hdev
, u8
*prio_tc
)
1519 struct hclge_vport
*vport
= hdev
->vport
;
1520 struct hnae3_knic_private_info
*kinfo
;
1523 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
1524 hdev
->tm_info
.prio_tc
[i
] = prio_tc
[i
];
1526 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1527 kinfo
= &vport
[k
].nic
.kinfo
;
1528 kinfo
->tc_info
.prio_tc
[i
] = prio_tc
[i
];
1533 void hclge_tm_schd_info_update(struct hclge_dev
*hdev
, u8 num_tc
)
1538 hdev
->tm_info
.num_tc
= num_tc
;
1540 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1545 hdev
->tm_info
.num_tc
= 1;
1548 hdev
->hw_tc_map
= bit_map
;
1550 hclge_tm_schd_info_init(hdev
);
1553 int hclge_tm_init_hw(struct hclge_dev
*hdev
, bool init
)
1557 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
1558 (hdev
->tx_sch_mode
!= HCLGE_FLAG_VNET_BASE_SCH_MODE
))
1561 ret
= hclge_tm_schd_setup_hw(hdev
);
1565 ret
= hclge_pause_setup_hw(hdev
, init
);
1572 int hclge_tm_schd_init(struct hclge_dev
*hdev
)
1574 /* fc_mode is HCLGE_FC_FULL on reset */
1575 hdev
->tm_info
.fc_mode
= HCLGE_FC_FULL
;
1576 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
1578 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
&&
1579 hdev
->tm_info
.num_pg
!= 1)
1582 hclge_tm_schd_info_init(hdev
);
1584 return hclge_tm_init_hw(hdev
, true);
1587 int hclge_tm_vport_map_update(struct hclge_dev
*hdev
)
1589 struct hclge_vport
*vport
= hdev
->vport
;
1592 hclge_tm_vport_tc_info_update(vport
);
1594 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
1598 if (hdev
->tm_info
.num_tc
== 1 && !hdev
->tm_info
.pfc_en
)
1601 return hclge_tm_bp_setup(hdev
);
1604 int hclge_tm_get_qset_num(struct hclge_dev
*hdev
, u16
*qset_num
)
1606 struct hclge_tm_nodes_cmd
*nodes
;
1607 struct hclge_desc desc
;
1610 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
) {
1611 /* Each PF has 8 qsets and each VF has 1 qset */
1612 *qset_num
= HCLGE_TM_PF_MAX_QSET_NUM
+ pci_num_vf(hdev
->pdev
);
1616 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NODES
, true);
1617 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1619 dev_err(&hdev
->pdev
->dev
,
1620 "failed to get qset num, ret = %d\n", ret
);
1624 nodes
= (struct hclge_tm_nodes_cmd
*)desc
.data
;
1625 *qset_num
= le16_to_cpu(nodes
->qset_num
);
1629 int hclge_tm_get_pri_num(struct hclge_dev
*hdev
, u8
*pri_num
)
1631 struct hclge_tm_nodes_cmd
*nodes
;
1632 struct hclge_desc desc
;
1635 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
) {
1636 *pri_num
= HCLGE_TM_PF_MAX_PRI_NUM
;
1640 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NODES
, true);
1641 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1643 dev_err(&hdev
->pdev
->dev
,
1644 "failed to get pri num, ret = %d\n", ret
);
1648 nodes
= (struct hclge_tm_nodes_cmd
*)desc
.data
;
1649 *pri_num
= nodes
->pri_num
;
1653 int hclge_tm_get_qset_map_pri(struct hclge_dev
*hdev
, u16 qset_id
, u8
*priority
,
1656 struct hclge_qs_to_pri_link_cmd
*map
;
1657 struct hclge_desc desc
;
1660 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, true);
1661 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
1662 map
->qs_id
= cpu_to_le16(qset_id
);
1663 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1665 dev_err(&hdev
->pdev
->dev
,
1666 "failed to get qset map priority, ret = %d\n", ret
);
1670 *priority
= map
->priority
;
1671 *link_vld
= map
->link_vld
;
1675 int hclge_tm_get_qset_sch_mode(struct hclge_dev
*hdev
, u16 qset_id
, u8
*mode
)
1677 struct hclge_qs_sch_mode_cfg_cmd
*qs_sch_mode
;
1678 struct hclge_desc desc
;
1681 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, true);
1682 qs_sch_mode
= (struct hclge_qs_sch_mode_cfg_cmd
*)desc
.data
;
1683 qs_sch_mode
->qs_id
= cpu_to_le16(qset_id
);
1684 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1686 dev_err(&hdev
->pdev
->dev
,
1687 "failed to get qset sch mode, ret = %d\n", ret
);
1691 *mode
= qs_sch_mode
->sch_mode
;
1695 int hclge_tm_get_qset_weight(struct hclge_dev
*hdev
, u16 qset_id
, u8
*weight
)
1697 struct hclge_qs_weight_cmd
*qs_weight
;
1698 struct hclge_desc desc
;
1701 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, true);
1702 qs_weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
1703 qs_weight
->qs_id
= cpu_to_le16(qset_id
);
1704 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1706 dev_err(&hdev
->pdev
->dev
,
1707 "failed to get qset weight, ret = %d\n", ret
);
1711 *weight
= qs_weight
->dwrr
;
1715 int hclge_tm_get_qset_shaper(struct hclge_dev
*hdev
, u16 qset_id
,
1716 struct hclge_tm_shaper_para
*para
)
1718 struct hclge_qs_shapping_cmd
*shap_cfg_cmd
;
1719 struct hclge_desc desc
;
1723 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QCN_SHAPPING_CFG
, true);
1724 shap_cfg_cmd
= (struct hclge_qs_shapping_cmd
*)desc
.data
;
1725 shap_cfg_cmd
->qs_id
= cpu_to_le16(qset_id
);
1726 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1728 dev_err(&hdev
->pdev
->dev
,
1729 "failed to get qset %u shaper, ret = %d\n", qset_id
,
1734 shapping_para
= le32_to_cpu(shap_cfg_cmd
->qs_shapping_para
);
1735 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1736 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1737 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1738 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1739 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1740 para
->flag
= shap_cfg_cmd
->flag
;
1741 para
->rate
= le32_to_cpu(shap_cfg_cmd
->qs_rate
);
1745 int hclge_tm_get_pri_sch_mode(struct hclge_dev
*hdev
, u8 pri_id
, u8
*mode
)
1747 struct hclge_pri_sch_mode_cfg_cmd
*pri_sch_mode
;
1748 struct hclge_desc desc
;
1751 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, true);
1752 pri_sch_mode
= (struct hclge_pri_sch_mode_cfg_cmd
*)desc
.data
;
1753 pri_sch_mode
->pri_id
= pri_id
;
1754 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1756 dev_err(&hdev
->pdev
->dev
,
1757 "failed to get priority sch mode, ret = %d\n", ret
);
1761 *mode
= pri_sch_mode
->sch_mode
;
1765 int hclge_tm_get_pri_weight(struct hclge_dev
*hdev
, u8 pri_id
, u8
*weight
)
1767 struct hclge_priority_weight_cmd
*priority_weight
;
1768 struct hclge_desc desc
;
1771 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, true);
1772 priority_weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
1773 priority_weight
->pri_id
= pri_id
;
1774 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1776 dev_err(&hdev
->pdev
->dev
,
1777 "failed to get priority weight, ret = %d\n", ret
);
1781 *weight
= priority_weight
->dwrr
;
1785 int hclge_tm_get_pri_shaper(struct hclge_dev
*hdev
, u8 pri_id
,
1786 enum hclge_opcode_type cmd
,
1787 struct hclge_tm_shaper_para
*para
)
1789 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
1790 struct hclge_desc desc
;
1794 if (cmd
!= HCLGE_OPC_TM_PRI_C_SHAPPING
&&
1795 cmd
!= HCLGE_OPC_TM_PRI_P_SHAPPING
)
1798 hclge_cmd_setup_basic_desc(&desc
, cmd
, true);
1799 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
1800 shap_cfg_cmd
->pri_id
= pri_id
;
1801 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1803 dev_err(&hdev
->pdev
->dev
,
1804 "failed to get priority shaper(%#x), ret = %d\n",
1809 shapping_para
= le32_to_cpu(shap_cfg_cmd
->pri_shapping_para
);
1810 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1811 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1812 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1813 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1814 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1815 para
->flag
= shap_cfg_cmd
->flag
;
1816 para
->rate
= le32_to_cpu(shap_cfg_cmd
->pri_rate
);
1820 int hclge_tm_get_q_to_qs_map(struct hclge_dev
*hdev
, u16 q_id
, u16
*qset_id
)
1822 struct hclge_nq_to_qs_link_cmd
*map
;
1823 struct hclge_desc desc
;
1828 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
1829 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, true);
1830 map
->nq_id
= cpu_to_le16(q_id
);
1831 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1833 dev_err(&hdev
->pdev
->dev
,
1834 "failed to get queue to qset map, ret = %d\n", ret
);
1837 *qset_id
= le16_to_cpu(map
->qset_id
);
1839 /* convert qset_id to the following format, drop the vld bit
1840 * | qs_id_h | vld | qs_id_l |
1841 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1844 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1846 qs_id_l
= hnae3_get_field(*qset_id
, HCLGE_TM_QS_ID_L_MSK
,
1847 HCLGE_TM_QS_ID_L_S
);
1848 qs_id_h
= hnae3_get_field(*qset_id
, HCLGE_TM_QS_ID_H_EXT_MSK
,
1849 HCLGE_TM_QS_ID_H_EXT_S
);
1851 hnae3_set_field(*qset_id
, HCLGE_TM_QS_ID_L_MSK
, HCLGE_TM_QS_ID_L_S
,
1853 hnae3_set_field(*qset_id
, HCLGE_TM_QS_ID_H_MSK
, HCLGE_TM_QS_ID_H_S
,
1858 int hclge_tm_get_q_to_tc(struct hclge_dev
*hdev
, u16 q_id
, u8
*tc_id
)
1860 #define HCLGE_TM_TC_MASK 0x7
1862 struct hclge_tqp_tx_queue_tc_cmd
*tc
;
1863 struct hclge_desc desc
;
1866 tc
= (struct hclge_tqp_tx_queue_tc_cmd
*)desc
.data
;
1867 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TQP_TX_QUEUE_TC
, true);
1868 tc
->queue_id
= cpu_to_le16(q_id
);
1869 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1871 dev_err(&hdev
->pdev
->dev
,
1872 "failed to get queue to tc map, ret = %d\n", ret
);
1876 *tc_id
= tc
->tc_id
& HCLGE_TM_TC_MASK
;
1880 int hclge_tm_get_pg_to_pri_map(struct hclge_dev
*hdev
, u8 pg_id
,
1883 struct hclge_pg_to_pri_link_cmd
*map
;
1884 struct hclge_desc desc
;
1887 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, true);
1888 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
1890 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1892 dev_err(&hdev
->pdev
->dev
,
1893 "failed to get pg to pri map, ret = %d\n", ret
);
1897 *pri_bit_map
= map
->pri_bit_map
;
1901 int hclge_tm_get_pg_weight(struct hclge_dev
*hdev
, u8 pg_id
, u8
*weight
)
1903 struct hclge_pg_weight_cmd
*pg_weight_cmd
;
1904 struct hclge_desc desc
;
1907 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, true);
1908 pg_weight_cmd
= (struct hclge_pg_weight_cmd
*)desc
.data
;
1909 pg_weight_cmd
->pg_id
= pg_id
;
1910 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1912 dev_err(&hdev
->pdev
->dev
,
1913 "failed to get pg weight, ret = %d\n", ret
);
1917 *weight
= pg_weight_cmd
->dwrr
;
1921 int hclge_tm_get_pg_sch_mode(struct hclge_dev
*hdev
, u8 pg_id
, u8
*mode
)
1923 struct hclge_desc desc
;
1926 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, true);
1927 desc
.data
[0] = cpu_to_le32(pg_id
);
1928 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1930 dev_err(&hdev
->pdev
->dev
,
1931 "failed to get pg sch mode, ret = %d\n", ret
);
1935 *mode
= (u8
)le32_to_cpu(desc
.data
[1]);
1939 int hclge_tm_get_pg_shaper(struct hclge_dev
*hdev
, u8 pg_id
,
1940 enum hclge_opcode_type cmd
,
1941 struct hclge_tm_shaper_para
*para
)
1943 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
1944 struct hclge_desc desc
;
1948 if (cmd
!= HCLGE_OPC_TM_PG_C_SHAPPING
&&
1949 cmd
!= HCLGE_OPC_TM_PG_P_SHAPPING
)
1952 hclge_cmd_setup_basic_desc(&desc
, cmd
, true);
1953 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
1954 shap_cfg_cmd
->pg_id
= pg_id
;
1955 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1957 dev_err(&hdev
->pdev
->dev
,
1958 "failed to get pg shaper(%#x), ret = %d\n",
1963 shapping_para
= le32_to_cpu(shap_cfg_cmd
->pg_shapping_para
);
1964 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1965 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1966 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1967 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1968 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1969 para
->flag
= shap_cfg_cmd
->flag
;
1970 para
->rate
= le32_to_cpu(shap_cfg_cmd
->pg_rate
);
1974 int hclge_tm_get_port_shaper(struct hclge_dev
*hdev
,
1975 struct hclge_tm_shaper_para
*para
)
1977 struct hclge_port_shapping_cmd
*port_shap_cfg_cmd
;
1978 struct hclge_desc desc
;
1982 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, true);
1983 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1985 dev_err(&hdev
->pdev
->dev
,
1986 "failed to get port shaper, ret = %d\n", ret
);
1990 port_shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
1991 shapping_para
= le32_to_cpu(port_shap_cfg_cmd
->port_shapping_para
);
1992 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1993 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1994 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1995 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1996 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1997 para
->flag
= port_shap_cfg_cmd
->flag
;
1998 para
->rate
= le32_to_cpu(port_shap_cfg_cmd
->port_rate
);