2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/etherdevice.h>
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
16 enum hclge_shaper_level
{
17 HCLGE_SHAPER_LVL_PRI
= 0,
18 HCLGE_SHAPER_LVL_PG
= 1,
19 HCLGE_SHAPER_LVL_PORT
= 2,
20 HCLGE_SHAPER_LVL_QSET
= 3,
21 HCLGE_SHAPER_LVL_CNT
= 4,
22 HCLGE_SHAPER_LVL_VF
= 0,
23 HCLGE_SHAPER_LVL_PF
= 1,
26 #define HCLGE_SHAPER_BS_U_DEF 1
27 #define HCLGE_SHAPER_BS_S_DEF 4
29 #define HCLGE_ETHER_MAX_RATE 100000
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32 * @ir: Rate to be config, its unit is Mbps
33 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34 * @ir_b: IR_B parameter of IR shaper
35 * @ir_u: IR_U parameter of IR shaper
36 * @ir_s: IR_S parameter of IR shaper
40 * IR_b * (2 ^ IR_u) * 8
41 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
44 * @return: 0: calculate sucessful, negative: fail
46 static int hclge_shaper_para_calc(u32 ir
, u8 shaper_level
,
47 u8
*ir_b
, u8
*ir_u
, u8
*ir_s
)
49 const u16 tick_array
[HCLGE_SHAPER_LVL_CNT
] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
55 u8 ir_u_calc
= 0, ir_s_calc
= 0;
60 if (shaper_level
>= HCLGE_SHAPER_LVL_CNT
)
63 tick
= tick_array
[shaper_level
];
66 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 * the formula is changed to:
69 * ir_calc = ---------------- * 1000
72 ir_calc
= (1008000 + (tick
>> 1) - 1) / tick
;
80 } else if (ir_calc
> ir
) {
81 /* Increasing the denominator to select ir_s value */
82 while (ir_calc
> ir
) {
84 ir_calc
= 1008000 / (tick
* (1 << ir_s_calc
));
90 *ir_b
= (ir
* tick
* (1 << ir_s_calc
) + 4000) / 8000;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc
< ir
) {
97 numerator
= 1008000 * (1 << ir_u_calc
);
98 ir_calc
= (numerator
+ (tick
>> 1)) / tick
;
104 u32 denominator
= (8000 * (1 << --ir_u_calc
));
105 *ir_b
= (ir
* tick
+ (denominator
>> 1)) / denominator
;
115 static int hclge_mac_pause_en_cfg(struct hclge_dev
*hdev
, bool tx
, bool rx
)
117 struct hclge_desc desc
;
119 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PAUSE_EN
, false);
121 desc
.data
[0] = cpu_to_le32((tx
? HCLGE_TX_MAC_PAUSE_EN_MSK
: 0) |
122 (rx
? HCLGE_RX_MAC_PAUSE_EN_MSK
: 0));
124 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
127 static int hclge_pfc_pause_en_cfg(struct hclge_dev
*hdev
, u8 tx_rx_bitmap
,
130 struct hclge_desc desc
;
131 struct hclge_pfc_en_cmd
*pfc
= (struct hclge_pfc_en_cmd
*)&desc
.data
;
133 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PFC_PAUSE_EN
, false);
135 pfc
->tx_rx_en_bitmap
= tx_rx_bitmap
;
136 pfc
->pri_en_bitmap
= pfc_bitmap
;
138 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
141 static int hclge_fill_pri_array(struct hclge_dev
*hdev
, u8
*pri
, u8 pri_id
)
145 tc
= hdev
->tm_info
.prio_tc
[pri_id
];
147 if (tc
>= hdev
->tm_info
.num_tc
)
151 * the register for priority has four bytes, the first bytes includes
152 * priority0 and priority1, the higher 4bit stands for priority1
153 * while the lower 4bit stands for priority0, as below:
154 * first byte: | pri_1 | pri_0 |
155 * second byte: | pri_3 | pri_2 |
156 * third byte: | pri_5 | pri_4 |
157 * fourth byte: | pri_7 | pri_6 |
159 pri
[pri_id
>> 1] |= tc
<< ((pri_id
& 1) * 4);
164 static int hclge_up_to_tc_map(struct hclge_dev
*hdev
)
166 struct hclge_desc desc
;
167 u8
*pri
= (u8
*)desc
.data
;
171 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PRI_TO_TC_MAPPING
, false);
173 for (pri_id
= 0; pri_id
< HNAE3_MAX_USER_PRIO
; pri_id
++) {
174 ret
= hclge_fill_pri_array(hdev
, pri
, pri_id
);
179 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
182 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev
*hdev
,
183 u8 pg_id
, u8 pri_bit_map
)
185 struct hclge_pg_to_pri_link_cmd
*map
;
186 struct hclge_desc desc
;
188 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, false);
190 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
193 map
->pri_bit_map
= pri_bit_map
;
195 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
198 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev
*hdev
,
201 struct hclge_qs_to_pri_link_cmd
*map
;
202 struct hclge_desc desc
;
204 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, false);
206 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
208 map
->qs_id
= cpu_to_le16(qs_id
);
210 map
->link_vld
= HCLGE_TM_QS_PRI_LINK_VLD_MSK
;
212 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
215 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev
*hdev
,
218 struct hclge_nq_to_qs_link_cmd
*map
;
219 struct hclge_desc desc
;
221 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, false);
223 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
225 map
->nq_id
= cpu_to_le16(q_id
);
226 map
->qset_id
= cpu_to_le16(qs_id
| HCLGE_TM_Q_QS_LINK_VLD_MSK
);
228 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
231 static int hclge_tm_pg_weight_cfg(struct hclge_dev
*hdev
, u8 pg_id
,
234 struct hclge_pg_weight_cmd
*weight
;
235 struct hclge_desc desc
;
237 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, false);
239 weight
= (struct hclge_pg_weight_cmd
*)desc
.data
;
241 weight
->pg_id
= pg_id
;
244 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
247 static int hclge_tm_pri_weight_cfg(struct hclge_dev
*hdev
, u8 pri_id
,
250 struct hclge_priority_weight_cmd
*weight
;
251 struct hclge_desc desc
;
253 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, false);
255 weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
257 weight
->pri_id
= pri_id
;
260 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
263 static int hclge_tm_qs_weight_cfg(struct hclge_dev
*hdev
, u16 qs_id
,
266 struct hclge_qs_weight_cmd
*weight
;
267 struct hclge_desc desc
;
269 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, false);
271 weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
273 weight
->qs_id
= cpu_to_le16(qs_id
);
276 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
279 static int hclge_tm_pg_shapping_cfg(struct hclge_dev
*hdev
,
280 enum hclge_shap_bucket bucket
, u8 pg_id
,
281 u8 ir_b
, u8 ir_u
, u8 ir_s
, u8 bs_b
, u8 bs_s
)
283 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
284 enum hclge_opcode_type opcode
;
285 struct hclge_desc desc
;
286 u32 shapping_para
= 0;
288 opcode
= bucket
? HCLGE_OPC_TM_PG_P_SHAPPING
:
289 HCLGE_OPC_TM_PG_C_SHAPPING
;
290 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
292 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
294 shap_cfg_cmd
->pg_id
= pg_id
;
296 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
297 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
298 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
299 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
300 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
302 shap_cfg_cmd
->pg_shapping_para
= cpu_to_le32(shapping_para
);
304 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
307 static int hclge_tm_port_shaper_cfg(struct hclge_dev
*hdev
)
309 struct hclge_port_shapping_cmd
*shap_cfg_cmd
;
310 struct hclge_desc desc
;
311 u32 shapping_para
= 0;
315 ret
= hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE
,
316 HCLGE_SHAPER_LVL_PORT
,
317 &ir_b
, &ir_u
, &ir_s
);
321 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, false);
322 shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
324 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
325 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
326 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
327 hclge_tm_set_field(shapping_para
, BS_B
, HCLGE_SHAPER_BS_U_DEF
);
328 hclge_tm_set_field(shapping_para
, BS_S
, HCLGE_SHAPER_BS_S_DEF
);
330 shap_cfg_cmd
->port_shapping_para
= cpu_to_le32(shapping_para
);
332 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
335 static int hclge_tm_pri_shapping_cfg(struct hclge_dev
*hdev
,
336 enum hclge_shap_bucket bucket
, u8 pri_id
,
337 u8 ir_b
, u8 ir_u
, u8 ir_s
,
340 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
341 enum hclge_opcode_type opcode
;
342 struct hclge_desc desc
;
343 u32 shapping_para
= 0;
345 opcode
= bucket
? HCLGE_OPC_TM_PRI_P_SHAPPING
:
346 HCLGE_OPC_TM_PRI_C_SHAPPING
;
348 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
350 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
352 shap_cfg_cmd
->pri_id
= pri_id
;
354 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
355 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
356 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
357 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
358 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
360 shap_cfg_cmd
->pri_shapping_para
= cpu_to_le32(shapping_para
);
362 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
365 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pg_id
)
367 struct hclge_desc desc
;
369 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, false);
371 if (hdev
->tm_info
.pg_info
[pg_id
].pg_sch_mode
== HCLGE_SCH_MODE_DWRR
)
372 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
376 desc
.data
[0] = cpu_to_le32(pg_id
);
378 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
381 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
383 struct hclge_desc desc
;
385 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, false);
387 if (hdev
->tm_info
.tc_info
[pri_id
].tc_sch_mode
== HCLGE_SCH_MODE_DWRR
)
388 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
392 desc
.data
[0] = cpu_to_le32(pri_id
);
394 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
397 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 mode
)
399 struct hclge_desc desc
;
401 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, false);
403 if (mode
== HCLGE_SCH_MODE_DWRR
)
404 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
408 desc
.data
[0] = cpu_to_le32(qs_id
);
410 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
413 static int hclge_tm_qs_bp_cfg(struct hclge_dev
*hdev
, u8 tc
)
415 struct hclge_bp_to_qs_map_cmd
*bp_to_qs_map_cmd
;
416 struct hclge_desc desc
;
418 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_BP_TO_QSET_MAPPING
,
421 bp_to_qs_map_cmd
= (struct hclge_bp_to_qs_map_cmd
*)desc
.data
;
423 bp_to_qs_map_cmd
->tc_id
= tc
;
425 /* Qset and tc is one by one mapping */
426 bp_to_qs_map_cmd
->qs_bit_map
= cpu_to_le32(1 << tc
);
428 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
431 static void hclge_tm_vport_tc_info_update(struct hclge_vport
*vport
)
433 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
434 struct hclge_dev
*hdev
= vport
->back
;
437 kinfo
= &vport
->nic
.kinfo
;
438 vport
->bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
440 min_t(u16
, kinfo
->num_tqps
, hdev
->tm_info
.num_tc
);
442 = min_t(u16
, hdev
->rss_size_max
,
443 kinfo
->num_tqps
/ kinfo
->num_tc
);
444 vport
->qs_offset
= hdev
->tm_info
.num_tc
* vport
->vport_id
;
445 vport
->dwrr
= 100; /* 100 percent as init */
446 vport
->alloc_rss_size
= kinfo
->rss_size
;
448 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
449 if (hdev
->hw_tc_map
& BIT(i
)) {
450 kinfo
->tc_info
[i
].enable
= true;
451 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
452 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
453 kinfo
->tc_info
[i
].tc
= i
;
455 /* Set to default queue if TC is disable */
456 kinfo
->tc_info
[i
].enable
= false;
457 kinfo
->tc_info
[i
].tqp_offset
= 0;
458 kinfo
->tc_info
[i
].tqp_count
= 1;
459 kinfo
->tc_info
[i
].tc
= 0;
463 memcpy(kinfo
->prio_tc
, hdev
->tm_info
.prio_tc
,
464 FIELD_SIZEOF(struct hnae3_knic_private_info
, prio_tc
));
467 static void hclge_tm_vport_info_update(struct hclge_dev
*hdev
)
469 struct hclge_vport
*vport
= hdev
->vport
;
472 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
473 hclge_tm_vport_tc_info_update(vport
);
479 static void hclge_tm_tc_info_init(struct hclge_dev
*hdev
)
483 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
484 hdev
->tm_info
.tc_info
[i
].tc_id
= i
;
485 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
= HCLGE_SCH_MODE_DWRR
;
486 hdev
->tm_info
.tc_info
[i
].pgid
= 0;
487 hdev
->tm_info
.tc_info
[i
].bw_limit
=
488 hdev
->tm_info
.pg_info
[0].bw_limit
;
491 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
492 hdev
->tm_info
.prio_tc
[i
] =
493 (i
>= hdev
->tm_info
.num_tc
) ? 0 : i
;
495 /* DCB is enabled if we have more than 1 TC */
496 if (hdev
->tm_info
.num_tc
> 1)
497 hdev
->flag
|= HCLGE_FLAG_DCB_ENABLE
;
499 hdev
->flag
&= ~HCLGE_FLAG_DCB_ENABLE
;
502 static void hclge_tm_pg_info_init(struct hclge_dev
*hdev
)
506 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
509 hdev
->tm_info
.pg_dwrr
[i
] = i
? 0 : 100;
511 hdev
->tm_info
.pg_info
[i
].pg_id
= i
;
512 hdev
->tm_info
.pg_info
[i
].pg_sch_mode
= HCLGE_SCH_MODE_DWRR
;
514 hdev
->tm_info
.pg_info
[i
].bw_limit
= HCLGE_ETHER_MAX_RATE
;
519 hdev
->tm_info
.pg_info
[i
].tc_bit_map
= hdev
->hw_tc_map
;
520 for (k
= 0; k
< hdev
->tm_info
.num_tc
; k
++)
521 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = 100;
525 static void hclge_pfc_info_init(struct hclge_dev
*hdev
)
527 if (!(hdev
->flag
& HCLGE_FLAG_DCB_ENABLE
)) {
528 if (hdev
->fc_mode_last_time
== HCLGE_FC_PFC
)
529 dev_warn(&hdev
->pdev
->dev
,
530 "DCB is disable, but last mode is FC_PFC\n");
532 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
533 } else if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
534 /* fc_mode_last_time record the last fc_mode when
535 * DCB is enabled, so that fc_mode can be set to
536 * the correct value when DCB is disabled.
538 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
539 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
543 static int hclge_tm_schd_info_init(struct hclge_dev
*hdev
)
545 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
546 (hdev
->tm_info
.num_pg
!= 1))
549 hclge_tm_pg_info_init(hdev
);
551 hclge_tm_tc_info_init(hdev
);
553 hclge_tm_vport_info_update(hdev
);
555 hclge_pfc_info_init(hdev
);
560 static int hclge_tm_pg_to_pri_map(struct hclge_dev
*hdev
)
565 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
568 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
570 ret
= hclge_tm_pg_to_pri_map_cfg(
571 hdev
, i
, hdev
->tm_info
.pg_info
[i
].tc_bit_map
);
579 static int hclge_tm_pg_shaper_cfg(struct hclge_dev
*hdev
)
586 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
590 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
591 /* Calc shaper para */
592 ret
= hclge_shaper_para_calc(
593 hdev
->tm_info
.pg_info
[i
].bw_limit
,
595 &ir_b
, &ir_u
, &ir_s
);
599 ret
= hclge_tm_pg_shapping_cfg(hdev
,
600 HCLGE_TM_SHAP_C_BUCKET
, i
,
601 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
602 HCLGE_SHAPER_BS_S_DEF
);
606 ret
= hclge_tm_pg_shapping_cfg(hdev
,
607 HCLGE_TM_SHAP_P_BUCKET
, i
,
609 HCLGE_SHAPER_BS_U_DEF
,
610 HCLGE_SHAPER_BS_S_DEF
);
618 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev
*hdev
)
624 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
628 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
630 ret
= hclge_tm_pg_weight_cfg(hdev
, i
,
631 hdev
->tm_info
.pg_dwrr
[i
]);
639 static int hclge_vport_q_to_qs_map(struct hclge_dev
*hdev
,
640 struct hclge_vport
*vport
)
642 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
643 struct hnae3_queue
**tqp
= kinfo
->tqp
;
644 struct hnae3_tc_info
*v_tc_info
;
648 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
649 v_tc_info
= &kinfo
->tc_info
[i
];
650 for (j
= 0; j
< v_tc_info
->tqp_count
; j
++) {
651 struct hnae3_queue
*q
= tqp
[v_tc_info
->tqp_offset
+ j
];
653 ret
= hclge_tm_q_to_qs_map_cfg(hdev
,
654 hclge_get_queue_id(q
),
655 vport
->qs_offset
+ i
);
664 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev
*hdev
)
666 struct hclge_vport
*vport
= hdev
->vport
;
670 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
671 /* Cfg qs -> pri mapping, one by one mapping */
672 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
673 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
674 ret
= hclge_tm_qs_to_pri_map_cfg(
675 hdev
, vport
[k
].qs_offset
+ i
, i
);
679 } else if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
) {
680 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
681 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
682 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
683 ret
= hclge_tm_qs_to_pri_map_cfg(
684 hdev
, vport
[k
].qs_offset
+ i
, k
);
692 /* Cfg q -> qs mapping */
693 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
694 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
704 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev
*hdev
)
710 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
711 ret
= hclge_shaper_para_calc(
712 hdev
->tm_info
.tc_info
[i
].bw_limit
,
713 HCLGE_SHAPER_LVL_PRI
,
714 &ir_b
, &ir_u
, &ir_s
);
718 ret
= hclge_tm_pri_shapping_cfg(
719 hdev
, HCLGE_TM_SHAP_C_BUCKET
, i
,
720 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
721 HCLGE_SHAPER_BS_S_DEF
);
725 ret
= hclge_tm_pri_shapping_cfg(
726 hdev
, HCLGE_TM_SHAP_P_BUCKET
, i
,
727 ir_b
, ir_u
, ir_s
, HCLGE_SHAPER_BS_U_DEF
,
728 HCLGE_SHAPER_BS_S_DEF
);
736 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport
*vport
)
738 struct hclge_dev
*hdev
= vport
->back
;
742 ret
= hclge_shaper_para_calc(vport
->bw_limit
, HCLGE_SHAPER_LVL_VF
,
743 &ir_b
, &ir_u
, &ir_s
);
747 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
,
749 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
750 HCLGE_SHAPER_BS_S_DEF
);
754 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
,
757 HCLGE_SHAPER_BS_U_DEF
,
758 HCLGE_SHAPER_BS_S_DEF
);
765 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport
*vport
)
767 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
768 struct hclge_dev
*hdev
= vport
->back
;
769 struct hnae3_tc_info
*v_tc_info
;
774 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
775 v_tc_info
= &kinfo
->tc_info
[i
];
776 ret
= hclge_shaper_para_calc(
777 hdev
->tm_info
.tc_info
[i
].bw_limit
,
778 HCLGE_SHAPER_LVL_QSET
,
779 &ir_b
, &ir_u
, &ir_s
);
787 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev
*hdev
)
789 struct hclge_vport
*vport
= hdev
->vport
;
793 /* Need config vport shaper */
794 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
795 ret
= hclge_tm_pri_vnet_base_shaper_pri_cfg(vport
);
799 ret
= hclge_tm_pri_vnet_base_shaper_qs_cfg(vport
);
809 static int hclge_tm_pri_shaper_cfg(struct hclge_dev
*hdev
)
813 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
814 ret
= hclge_tm_pri_tc_base_shaper_cfg(hdev
);
818 ret
= hclge_tm_pri_vnet_base_shaper_cfg(hdev
);
826 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev
*hdev
)
828 struct hclge_vport
*vport
= hdev
->vport
;
829 struct hclge_pg_info
*pg_info
;
834 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
836 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
837 dwrr
= pg_info
->tc_dwrr
[i
];
839 ret
= hclge_tm_pri_weight_cfg(hdev
, i
, dwrr
);
843 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
844 ret
= hclge_tm_qs_weight_cfg(
845 hdev
, vport
[k
].qs_offset
+ i
,
855 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport
*vport
)
857 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
858 struct hclge_dev
*hdev
= vport
->back
;
863 ret
= hclge_tm_pri_weight_cfg(hdev
, vport
->vport_id
, vport
->dwrr
);
868 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
869 ret
= hclge_tm_qs_weight_cfg(
870 hdev
, vport
->qs_offset
+ i
,
871 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
]);
879 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev
*hdev
)
881 struct hclge_vport
*vport
= hdev
->vport
;
885 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
886 ret
= hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport
);
896 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev
*hdev
)
900 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
901 ret
= hclge_tm_pri_tc_base_dwrr_cfg(hdev
);
905 ret
= hclge_tm_pri_vnet_base_dwrr_cfg(hdev
);
913 int hclge_tm_map_cfg(struct hclge_dev
*hdev
)
917 ret
= hclge_up_to_tc_map(hdev
);
921 ret
= hclge_tm_pg_to_pri_map(hdev
);
925 return hclge_tm_pri_q_qs_cfg(hdev
);
928 static int hclge_tm_shaper_cfg(struct hclge_dev
*hdev
)
932 ret
= hclge_tm_port_shaper_cfg(hdev
);
936 ret
= hclge_tm_pg_shaper_cfg(hdev
);
940 return hclge_tm_pri_shaper_cfg(hdev
);
943 int hclge_tm_dwrr_cfg(struct hclge_dev
*hdev
)
947 ret
= hclge_tm_pg_dwrr_cfg(hdev
);
951 return hclge_tm_pri_dwrr_cfg(hdev
);
954 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev
*hdev
)
959 /* Only being config on TC-Based scheduler mode */
960 if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
963 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
964 ret
= hclge_tm_pg_schd_mode_cfg(hdev
, i
);
972 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport
*vport
)
974 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
975 struct hclge_dev
*hdev
= vport
->back
;
979 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, vport
->vport_id
);
983 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
984 u8 sch_mode
= hdev
->tm_info
.tc_info
[i
].tc_sch_mode
;
986 ret
= hclge_tm_qs_schd_mode_cfg(hdev
, vport
->qs_offset
+ i
,
995 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev
*hdev
)
997 struct hclge_vport
*vport
= hdev
->vport
;
1001 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1002 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1003 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, i
);
1007 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1008 ret
= hclge_tm_qs_schd_mode_cfg(
1009 hdev
, vport
[k
].qs_offset
+ i
,
1010 HCLGE_SCH_MODE_DWRR
);
1016 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1017 ret
= hclge_tm_schd_mode_vnet_base_cfg(vport
);
1028 int hclge_tm_schd_mode_hw(struct hclge_dev
*hdev
)
1032 ret
= hclge_tm_lvl2_schd_mode_cfg(hdev
);
1036 return hclge_tm_lvl34_schd_mode_cfg(hdev
);
1039 static int hclge_tm_schd_setup_hw(struct hclge_dev
*hdev
)
1043 /* Cfg tm mapping */
1044 ret
= hclge_tm_map_cfg(hdev
);
1049 ret
= hclge_tm_shaper_cfg(hdev
);
1054 ret
= hclge_tm_dwrr_cfg(hdev
);
1058 /* Cfg schd mode for each level schd */
1059 return hclge_tm_schd_mode_hw(hdev
);
1062 static int hclge_pfc_setup_hw(struct hclge_dev
*hdev
)
1064 u8 enable_bitmap
= 0;
1066 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
1067 enable_bitmap
= HCLGE_TX_MAC_PAUSE_EN_MSK
|
1068 HCLGE_RX_MAC_PAUSE_EN_MSK
;
1070 return hclge_pfc_pause_en_cfg(hdev
, enable_bitmap
,
1071 hdev
->tm_info
.hw_pfc_map
);
1074 static int hclge_mac_pause_setup_hw(struct hclge_dev
*hdev
)
1078 switch (hdev
->tm_info
.fc_mode
) {
1083 case HCLGE_FC_RX_PAUSE
:
1087 case HCLGE_FC_TX_PAUSE
:
1100 return hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
1103 int hclge_pause_setup_hw(struct hclge_dev
*hdev
)
1108 if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
)
1109 return hclge_mac_pause_setup_hw(hdev
);
1111 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1112 if (!hnae3_dev_dcb_supported(hdev
))
1115 /* When MAC is GE Mode, hdev does not support pfc setting */
1116 ret
= hclge_pfc_setup_hw(hdev
);
1118 dev_warn(&hdev
->pdev
->dev
, "set pfc pause failed:%d\n", ret
);
1120 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1121 ret
= hclge_tm_qs_bp_cfg(hdev
, i
);
1129 int hclge_tm_prio_tc_info_update(struct hclge_dev
*hdev
, u8
*prio_tc
)
1131 struct hclge_vport
*vport
= hdev
->vport
;
1132 struct hnae3_knic_private_info
*kinfo
;
1135 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
1136 if (prio_tc
[i
] >= hdev
->tm_info
.num_tc
)
1138 hdev
->tm_info
.prio_tc
[i
] = prio_tc
[i
];
1140 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1141 kinfo
= &vport
[k
].nic
.kinfo
;
1142 kinfo
->prio_tc
[i
] = prio_tc
[i
];
1148 void hclge_tm_schd_info_update(struct hclge_dev
*hdev
, u8 num_tc
)
1152 hdev
->tm_info
.num_tc
= num_tc
;
1154 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1159 hdev
->tm_info
.num_tc
= 1;
1162 hdev
->hw_tc_map
= bit_map
;
1164 hclge_tm_schd_info_init(hdev
);
1167 int hclge_tm_init_hw(struct hclge_dev
*hdev
)
1171 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
1172 (hdev
->tx_sch_mode
!= HCLGE_FLAG_VNET_BASE_SCH_MODE
))
1175 ret
= hclge_tm_schd_setup_hw(hdev
);
1179 ret
= hclge_pause_setup_hw(hdev
);
1186 int hclge_tm_schd_init(struct hclge_dev
*hdev
)
1190 /* fc_mode is HCLGE_FC_FULL on reset */
1191 hdev
->tm_info
.fc_mode
= HCLGE_FC_FULL
;
1192 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
1194 ret
= hclge_tm_schd_info_init(hdev
);
1198 return hclge_tm_init_hw(hdev
);