2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/etherdevice.h>
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
16 enum hclge_shaper_level
{
17 HCLGE_SHAPER_LVL_PRI
= 0,
18 HCLGE_SHAPER_LVL_PG
= 1,
19 HCLGE_SHAPER_LVL_PORT
= 2,
20 HCLGE_SHAPER_LVL_QSET
= 3,
21 HCLGE_SHAPER_LVL_CNT
= 4,
22 HCLGE_SHAPER_LVL_VF
= 0,
23 HCLGE_SHAPER_LVL_PF
= 1,
26 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
27 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
29 #define HCLGE_SHAPER_BS_U_DEF 5
30 #define HCLGE_SHAPER_BS_S_DEF 20
32 #define HCLGE_ETHER_MAX_RATE 100000
34 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
35 * @ir: Rate to be config, its unit is Mbps
36 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
37 * @ir_b: IR_B parameter of IR shaper
38 * @ir_u: IR_U parameter of IR shaper
39 * @ir_s: IR_S parameter of IR shaper
43 * IR_b * (2 ^ IR_u) * 8
44 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
47 * @return: 0: calculate sucessful, negative: fail
49 static int hclge_shaper_para_calc(u32 ir
, u8 shaper_level
,
50 u8
*ir_b
, u8
*ir_u
, u8
*ir_s
)
52 const u16 tick_array
[HCLGE_SHAPER_LVL_CNT
] = {
53 6 * 256, /* Prioriy level */
54 6 * 32, /* Prioriy group level */
55 6 * 8, /* Port level */
56 6 * 256 /* Qset level */
58 u8 ir_u_calc
= 0, ir_s_calc
= 0;
63 if (shaper_level
>= HCLGE_SHAPER_LVL_CNT
)
66 tick
= tick_array
[shaper_level
];
69 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
70 * the formula is changed to:
72 * ir_calc = ---------------- * 1000
75 ir_calc
= (1008000 + (tick
>> 1) - 1) / tick
;
83 } else if (ir_calc
> ir
) {
84 /* Increasing the denominator to select ir_s value */
85 while (ir_calc
> ir
) {
87 ir_calc
= 1008000 / (tick
* (1 << ir_s_calc
));
93 *ir_b
= (ir
* tick
* (1 << ir_s_calc
) + 4000) / 8000;
95 /* Increasing the numerator to select ir_u value */
98 while (ir_calc
< ir
) {
100 numerator
= 1008000 * (1 << ir_u_calc
);
101 ir_calc
= (numerator
+ (tick
>> 1)) / tick
;
107 u32 denominator
= (8000 * (1 << --ir_u_calc
));
108 *ir_b
= (ir
* tick
+ (denominator
>> 1)) / denominator
;
118 static int hclge_pfc_stats_get(struct hclge_dev
*hdev
,
119 enum hclge_opcode_type opcode
, u64
*stats
)
121 struct hclge_desc desc
[HCLGE_TM_PFC_PKT_GET_CMD_NUM
];
124 if (!(opcode
== HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
||
125 opcode
== HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
))
128 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
; i
++) {
129 hclge_cmd_setup_basic_desc(&desc
[i
], opcode
, true);
130 if (i
!= (HCLGE_TM_PFC_PKT_GET_CMD_NUM
- 1))
131 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
133 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
136 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_TM_PFC_PKT_GET_CMD_NUM
);
140 for (i
= 0; i
< HCLGE_TM_PFC_PKT_GET_CMD_NUM
; i
++) {
141 struct hclge_pfc_stats_cmd
*pfc_stats
=
142 (struct hclge_pfc_stats_cmd
*)desc
[i
].data
;
144 for (j
= 0; j
< HCLGE_TM_PFC_NUM_GET_PER_CMD
; j
++) {
145 u32 index
= i
* HCLGE_TM_PFC_PKT_GET_CMD_NUM
+ j
;
147 if (index
< HCLGE_MAX_TC_NUM
)
149 le64_to_cpu(pfc_stats
->pkt_num
[j
]);
155 int hclge_pfc_rx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
157 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT
, stats
);
160 int hclge_pfc_tx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
162 return hclge_pfc_stats_get(hdev
, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT
, stats
);
165 int hclge_mac_pause_en_cfg(struct hclge_dev
*hdev
, bool tx
, bool rx
)
167 struct hclge_desc desc
;
169 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PAUSE_EN
, false);
171 desc
.data
[0] = cpu_to_le32((tx
? HCLGE_TX_MAC_PAUSE_EN_MSK
: 0) |
172 (rx
? HCLGE_RX_MAC_PAUSE_EN_MSK
: 0));
174 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
177 static int hclge_pfc_pause_en_cfg(struct hclge_dev
*hdev
, u8 tx_rx_bitmap
,
180 struct hclge_desc desc
;
181 struct hclge_pfc_en_cmd
*pfc
= (struct hclge_pfc_en_cmd
*)&desc
.data
;
183 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PFC_PAUSE_EN
, false);
185 pfc
->tx_rx_en_bitmap
= tx_rx_bitmap
;
186 pfc
->pri_en_bitmap
= pfc_bitmap
;
188 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
191 static int hclge_pause_param_cfg(struct hclge_dev
*hdev
, const u8
*addr
,
192 u8 pause_trans_gap
, u16 pause_trans_time
)
194 struct hclge_cfg_pause_param_cmd
*pause_param
;
195 struct hclge_desc desc
;
197 pause_param
= (struct hclge_cfg_pause_param_cmd
*)&desc
.data
;
199 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, false);
201 ether_addr_copy(pause_param
->mac_addr
, addr
);
202 pause_param
->pause_trans_gap
= pause_trans_gap
;
203 pause_param
->pause_trans_time
= cpu_to_le16(pause_trans_time
);
205 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
208 int hclge_pause_addr_cfg(struct hclge_dev
*hdev
, const u8
*mac_addr
)
210 struct hclge_cfg_pause_param_cmd
*pause_param
;
211 struct hclge_desc desc
;
216 pause_param
= (struct hclge_cfg_pause_param_cmd
*)&desc
.data
;
218 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, true);
220 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
224 trans_gap
= pause_param
->pause_trans_gap
;
225 trans_time
= le16_to_cpu(pause_param
->pause_trans_time
);
227 return hclge_pause_param_cfg(hdev
, mac_addr
, trans_gap
,
231 static int hclge_fill_pri_array(struct hclge_dev
*hdev
, u8
*pri
, u8 pri_id
)
235 tc
= hdev
->tm_info
.prio_tc
[pri_id
];
237 if (tc
>= hdev
->tm_info
.num_tc
)
241 * the register for priority has four bytes, the first bytes includes
242 * priority0 and priority1, the higher 4bit stands for priority1
243 * while the lower 4bit stands for priority0, as below:
244 * first byte: | pri_1 | pri_0 |
245 * second byte: | pri_3 | pri_2 |
246 * third byte: | pri_5 | pri_4 |
247 * fourth byte: | pri_7 | pri_6 |
249 pri
[pri_id
>> 1] |= tc
<< ((pri_id
& 1) * 4);
254 static int hclge_up_to_tc_map(struct hclge_dev
*hdev
)
256 struct hclge_desc desc
;
257 u8
*pri
= (u8
*)desc
.data
;
261 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PRI_TO_TC_MAPPING
, false);
263 for (pri_id
= 0; pri_id
< HNAE3_MAX_USER_PRIO
; pri_id
++) {
264 ret
= hclge_fill_pri_array(hdev
, pri
, pri_id
);
269 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
272 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev
*hdev
,
273 u8 pg_id
, u8 pri_bit_map
)
275 struct hclge_pg_to_pri_link_cmd
*map
;
276 struct hclge_desc desc
;
278 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, false);
280 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
283 map
->pri_bit_map
= pri_bit_map
;
285 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
288 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev
*hdev
,
291 struct hclge_qs_to_pri_link_cmd
*map
;
292 struct hclge_desc desc
;
294 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, false);
296 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
298 map
->qs_id
= cpu_to_le16(qs_id
);
300 map
->link_vld
= HCLGE_TM_QS_PRI_LINK_VLD_MSK
;
302 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
305 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev
*hdev
,
308 struct hclge_nq_to_qs_link_cmd
*map
;
309 struct hclge_desc desc
;
311 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, false);
313 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
315 map
->nq_id
= cpu_to_le16(q_id
);
316 map
->qset_id
= cpu_to_le16(qs_id
| HCLGE_TM_Q_QS_LINK_VLD_MSK
);
318 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
321 static int hclge_tm_pg_weight_cfg(struct hclge_dev
*hdev
, u8 pg_id
,
324 struct hclge_pg_weight_cmd
*weight
;
325 struct hclge_desc desc
;
327 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, false);
329 weight
= (struct hclge_pg_weight_cmd
*)desc
.data
;
331 weight
->pg_id
= pg_id
;
334 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
337 static int hclge_tm_pri_weight_cfg(struct hclge_dev
*hdev
, u8 pri_id
,
340 struct hclge_priority_weight_cmd
*weight
;
341 struct hclge_desc desc
;
343 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, false);
345 weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
347 weight
->pri_id
= pri_id
;
350 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
353 static int hclge_tm_qs_weight_cfg(struct hclge_dev
*hdev
, u16 qs_id
,
356 struct hclge_qs_weight_cmd
*weight
;
357 struct hclge_desc desc
;
359 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, false);
361 weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
363 weight
->qs_id
= cpu_to_le16(qs_id
);
366 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
369 static int hclge_tm_pg_shapping_cfg(struct hclge_dev
*hdev
,
370 enum hclge_shap_bucket bucket
, u8 pg_id
,
371 u8 ir_b
, u8 ir_u
, u8 ir_s
, u8 bs_b
, u8 bs_s
)
373 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
374 enum hclge_opcode_type opcode
;
375 struct hclge_desc desc
;
376 u32 shapping_para
= 0;
378 opcode
= bucket
? HCLGE_OPC_TM_PG_P_SHAPPING
:
379 HCLGE_OPC_TM_PG_C_SHAPPING
;
380 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
382 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
384 shap_cfg_cmd
->pg_id
= pg_id
;
386 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
387 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
388 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
389 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
390 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
392 shap_cfg_cmd
->pg_shapping_para
= cpu_to_le32(shapping_para
);
394 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
397 static int hclge_tm_port_shaper_cfg(struct hclge_dev
*hdev
)
399 struct hclge_port_shapping_cmd
*shap_cfg_cmd
;
400 struct hclge_desc desc
;
401 u32 shapping_para
= 0;
405 ret
= hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE
,
406 HCLGE_SHAPER_LVL_PORT
,
407 &ir_b
, &ir_u
, &ir_s
);
411 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, false);
412 shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
414 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
415 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
416 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
417 hclge_tm_set_field(shapping_para
, BS_B
, HCLGE_SHAPER_BS_U_DEF
);
418 hclge_tm_set_field(shapping_para
, BS_S
, HCLGE_SHAPER_BS_S_DEF
);
420 shap_cfg_cmd
->port_shapping_para
= cpu_to_le32(shapping_para
);
422 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
425 static int hclge_tm_pri_shapping_cfg(struct hclge_dev
*hdev
,
426 enum hclge_shap_bucket bucket
, u8 pri_id
,
427 u8 ir_b
, u8 ir_u
, u8 ir_s
,
430 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
431 enum hclge_opcode_type opcode
;
432 struct hclge_desc desc
;
433 u32 shapping_para
= 0;
435 opcode
= bucket
? HCLGE_OPC_TM_PRI_P_SHAPPING
:
436 HCLGE_OPC_TM_PRI_C_SHAPPING
;
438 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
440 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
442 shap_cfg_cmd
->pri_id
= pri_id
;
444 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
445 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
446 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
447 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
448 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
450 shap_cfg_cmd
->pri_shapping_para
= cpu_to_le32(shapping_para
);
452 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
455 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pg_id
)
457 struct hclge_desc desc
;
459 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, false);
461 if (hdev
->tm_info
.pg_info
[pg_id
].pg_sch_mode
== HCLGE_SCH_MODE_DWRR
)
462 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
466 desc
.data
[0] = cpu_to_le32(pg_id
);
468 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
471 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
473 struct hclge_desc desc
;
475 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, false);
477 if (hdev
->tm_info
.tc_info
[pri_id
].tc_sch_mode
== HCLGE_SCH_MODE_DWRR
)
478 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
482 desc
.data
[0] = cpu_to_le32(pri_id
);
484 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
487 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 mode
)
489 struct hclge_desc desc
;
491 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, false);
493 if (mode
== HCLGE_SCH_MODE_DWRR
)
494 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
498 desc
.data
[0] = cpu_to_le32(qs_id
);
500 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
503 static int hclge_tm_qs_bp_cfg(struct hclge_dev
*hdev
, u8 tc
, u8 grp_id
,
506 struct hclge_bp_to_qs_map_cmd
*bp_to_qs_map_cmd
;
507 struct hclge_desc desc
;
509 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_BP_TO_QSET_MAPPING
,
512 bp_to_qs_map_cmd
= (struct hclge_bp_to_qs_map_cmd
*)desc
.data
;
514 bp_to_qs_map_cmd
->tc_id
= tc
;
515 bp_to_qs_map_cmd
->qs_group_id
= grp_id
;
516 bp_to_qs_map_cmd
->qs_bit_map
= cpu_to_le32(bit_map
);
518 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
521 static void hclge_tm_vport_tc_info_update(struct hclge_vport
*vport
)
523 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
524 struct hclge_dev
*hdev
= vport
->back
;
527 vport
->bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
529 min_t(u16
, kinfo
->num_tqps
, hdev
->tm_info
.num_tc
);
531 = min_t(u16
, hdev
->rss_size_max
,
532 kinfo
->num_tqps
/ kinfo
->num_tc
);
533 vport
->qs_offset
= hdev
->tm_info
.num_tc
* vport
->vport_id
;
534 vport
->dwrr
= 100; /* 100 percent as init */
535 vport
->alloc_rss_size
= kinfo
->rss_size
;
537 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
538 if (hdev
->hw_tc_map
& BIT(i
)) {
539 kinfo
->tc_info
[i
].enable
= true;
540 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
541 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
542 kinfo
->tc_info
[i
].tc
= i
;
544 /* Set to default queue if TC is disable */
545 kinfo
->tc_info
[i
].enable
= false;
546 kinfo
->tc_info
[i
].tqp_offset
= 0;
547 kinfo
->tc_info
[i
].tqp_count
= 1;
548 kinfo
->tc_info
[i
].tc
= 0;
552 memcpy(kinfo
->prio_tc
, hdev
->tm_info
.prio_tc
,
553 FIELD_SIZEOF(struct hnae3_knic_private_info
, prio_tc
));
556 static void hclge_tm_vport_info_update(struct hclge_dev
*hdev
)
558 struct hclge_vport
*vport
= hdev
->vport
;
561 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
562 hclge_tm_vport_tc_info_update(vport
);
568 static void hclge_tm_tc_info_init(struct hclge_dev
*hdev
)
572 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
573 hdev
->tm_info
.tc_info
[i
].tc_id
= i
;
574 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
= HCLGE_SCH_MODE_DWRR
;
575 hdev
->tm_info
.tc_info
[i
].pgid
= 0;
576 hdev
->tm_info
.tc_info
[i
].bw_limit
=
577 hdev
->tm_info
.pg_info
[0].bw_limit
;
580 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
581 hdev
->tm_info
.prio_tc
[i
] =
582 (i
>= hdev
->tm_info
.num_tc
) ? 0 : i
;
584 /* DCB is enabled if we have more than 1 TC */
585 if (hdev
->tm_info
.num_tc
> 1)
586 hdev
->flag
|= HCLGE_FLAG_DCB_ENABLE
;
588 hdev
->flag
&= ~HCLGE_FLAG_DCB_ENABLE
;
591 static void hclge_tm_pg_info_init(struct hclge_dev
*hdev
)
595 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
598 hdev
->tm_info
.pg_dwrr
[i
] = i
? 0 : 100;
600 hdev
->tm_info
.pg_info
[i
].pg_id
= i
;
601 hdev
->tm_info
.pg_info
[i
].pg_sch_mode
= HCLGE_SCH_MODE_DWRR
;
603 hdev
->tm_info
.pg_info
[i
].bw_limit
= HCLGE_ETHER_MAX_RATE
;
608 hdev
->tm_info
.pg_info
[i
].tc_bit_map
= hdev
->hw_tc_map
;
609 for (k
= 0; k
< hdev
->tm_info
.num_tc
; k
++)
610 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = 100;
614 static void hclge_pfc_info_init(struct hclge_dev
*hdev
)
616 if (!(hdev
->flag
& HCLGE_FLAG_DCB_ENABLE
)) {
617 if (hdev
->fc_mode_last_time
== HCLGE_FC_PFC
)
618 dev_warn(&hdev
->pdev
->dev
,
619 "DCB is disable, but last mode is FC_PFC\n");
621 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
622 } else if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
623 /* fc_mode_last_time record the last fc_mode when
624 * DCB is enabled, so that fc_mode can be set to
625 * the correct value when DCB is disabled.
627 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
628 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
632 static int hclge_tm_schd_info_init(struct hclge_dev
*hdev
)
634 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
635 (hdev
->tm_info
.num_pg
!= 1))
638 hclge_tm_pg_info_init(hdev
);
640 hclge_tm_tc_info_init(hdev
);
642 hclge_tm_vport_info_update(hdev
);
644 hclge_pfc_info_init(hdev
);
649 static int hclge_tm_pg_to_pri_map(struct hclge_dev
*hdev
)
654 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
657 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
659 ret
= hclge_tm_pg_to_pri_map_cfg(
660 hdev
, i
, hdev
->tm_info
.pg_info
[i
].tc_bit_map
);
668 static int hclge_tm_pg_shaper_cfg(struct hclge_dev
*hdev
)
675 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
679 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
680 /* Calc shaper para */
681 ret
= hclge_shaper_para_calc(
682 hdev
->tm_info
.pg_info
[i
].bw_limit
,
684 &ir_b
, &ir_u
, &ir_s
);
688 ret
= hclge_tm_pg_shapping_cfg(hdev
,
689 HCLGE_TM_SHAP_C_BUCKET
, i
,
690 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
691 HCLGE_SHAPER_BS_S_DEF
);
695 ret
= hclge_tm_pg_shapping_cfg(hdev
,
696 HCLGE_TM_SHAP_P_BUCKET
, i
,
698 HCLGE_SHAPER_BS_U_DEF
,
699 HCLGE_SHAPER_BS_S_DEF
);
707 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev
*hdev
)
713 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
717 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
719 ret
= hclge_tm_pg_weight_cfg(hdev
, i
,
720 hdev
->tm_info
.pg_dwrr
[i
]);
728 static int hclge_vport_q_to_qs_map(struct hclge_dev
*hdev
,
729 struct hclge_vport
*vport
)
731 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
732 struct hnae3_queue
**tqp
= kinfo
->tqp
;
733 struct hnae3_tc_info
*v_tc_info
;
737 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
738 v_tc_info
= &kinfo
->tc_info
[i
];
739 for (j
= 0; j
< v_tc_info
->tqp_count
; j
++) {
740 struct hnae3_queue
*q
= tqp
[v_tc_info
->tqp_offset
+ j
];
742 ret
= hclge_tm_q_to_qs_map_cfg(hdev
,
743 hclge_get_queue_id(q
),
744 vport
->qs_offset
+ i
);
753 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev
*hdev
)
755 struct hclge_vport
*vport
= hdev
->vport
;
759 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
760 /* Cfg qs -> pri mapping, one by one mapping */
761 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
762 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
763 ret
= hclge_tm_qs_to_pri_map_cfg(
764 hdev
, vport
[k
].qs_offset
+ i
, i
);
768 } else if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
) {
769 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
770 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
771 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
772 ret
= hclge_tm_qs_to_pri_map_cfg(
773 hdev
, vport
[k
].qs_offset
+ i
, k
);
781 /* Cfg q -> qs mapping */
782 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
783 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
793 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev
*hdev
)
799 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
800 ret
= hclge_shaper_para_calc(
801 hdev
->tm_info
.tc_info
[i
].bw_limit
,
802 HCLGE_SHAPER_LVL_PRI
,
803 &ir_b
, &ir_u
, &ir_s
);
807 ret
= hclge_tm_pri_shapping_cfg(
808 hdev
, HCLGE_TM_SHAP_C_BUCKET
, i
,
809 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
810 HCLGE_SHAPER_BS_S_DEF
);
814 ret
= hclge_tm_pri_shapping_cfg(
815 hdev
, HCLGE_TM_SHAP_P_BUCKET
, i
,
816 ir_b
, ir_u
, ir_s
, HCLGE_SHAPER_BS_U_DEF
,
817 HCLGE_SHAPER_BS_S_DEF
);
825 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport
*vport
)
827 struct hclge_dev
*hdev
= vport
->back
;
831 ret
= hclge_shaper_para_calc(vport
->bw_limit
, HCLGE_SHAPER_LVL_VF
,
832 &ir_b
, &ir_u
, &ir_s
);
836 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
,
838 0, 0, 0, HCLGE_SHAPER_BS_U_DEF
,
839 HCLGE_SHAPER_BS_S_DEF
);
843 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
,
846 HCLGE_SHAPER_BS_U_DEF
,
847 HCLGE_SHAPER_BS_S_DEF
);
854 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport
*vport
)
856 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
857 struct hclge_dev
*hdev
= vport
->back
;
862 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
863 ret
= hclge_shaper_para_calc(
864 hdev
->tm_info
.tc_info
[i
].bw_limit
,
865 HCLGE_SHAPER_LVL_QSET
,
866 &ir_b
, &ir_u
, &ir_s
);
874 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev
*hdev
)
876 struct hclge_vport
*vport
= hdev
->vport
;
880 /* Need config vport shaper */
881 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
882 ret
= hclge_tm_pri_vnet_base_shaper_pri_cfg(vport
);
886 ret
= hclge_tm_pri_vnet_base_shaper_qs_cfg(vport
);
896 static int hclge_tm_pri_shaper_cfg(struct hclge_dev
*hdev
)
900 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
901 ret
= hclge_tm_pri_tc_base_shaper_cfg(hdev
);
905 ret
= hclge_tm_pri_vnet_base_shaper_cfg(hdev
);
913 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev
*hdev
)
915 struct hclge_vport
*vport
= hdev
->vport
;
916 struct hclge_pg_info
*pg_info
;
921 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
923 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
924 dwrr
= pg_info
->tc_dwrr
[i
];
926 ret
= hclge_tm_pri_weight_cfg(hdev
, i
, dwrr
);
930 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
931 ret
= hclge_tm_qs_weight_cfg(
932 hdev
, vport
[k
].qs_offset
+ i
,
942 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport
*vport
)
944 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
945 struct hclge_dev
*hdev
= vport
->back
;
950 ret
= hclge_tm_pri_weight_cfg(hdev
, vport
->vport_id
, vport
->dwrr
);
955 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
956 ret
= hclge_tm_qs_weight_cfg(
957 hdev
, vport
->qs_offset
+ i
,
958 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
]);
966 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev
*hdev
)
968 struct hclge_vport
*vport
= hdev
->vport
;
972 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
973 ret
= hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport
);
983 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev
*hdev
)
987 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
988 ret
= hclge_tm_pri_tc_base_dwrr_cfg(hdev
);
992 ret
= hclge_tm_pri_vnet_base_dwrr_cfg(hdev
);
1000 int hclge_tm_map_cfg(struct hclge_dev
*hdev
)
1004 ret
= hclge_up_to_tc_map(hdev
);
1008 ret
= hclge_tm_pg_to_pri_map(hdev
);
1012 return hclge_tm_pri_q_qs_cfg(hdev
);
1015 static int hclge_tm_shaper_cfg(struct hclge_dev
*hdev
)
1019 ret
= hclge_tm_port_shaper_cfg(hdev
);
1023 ret
= hclge_tm_pg_shaper_cfg(hdev
);
1027 return hclge_tm_pri_shaper_cfg(hdev
);
1030 int hclge_tm_dwrr_cfg(struct hclge_dev
*hdev
)
1034 ret
= hclge_tm_pg_dwrr_cfg(hdev
);
1038 return hclge_tm_pri_dwrr_cfg(hdev
);
1041 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev
*hdev
)
1046 /* Only being config on TC-Based scheduler mode */
1047 if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
1050 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
1051 ret
= hclge_tm_pg_schd_mode_cfg(hdev
, i
);
1059 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport
*vport
)
1061 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1062 struct hclge_dev
*hdev
= vport
->back
;
1066 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, vport
->vport_id
);
1070 for (i
= 0; i
< kinfo
->num_tc
; i
++) {
1071 u8 sch_mode
= hdev
->tm_info
.tc_info
[i
].tc_sch_mode
;
1073 ret
= hclge_tm_qs_schd_mode_cfg(hdev
, vport
->qs_offset
+ i
,
1082 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev
*hdev
)
1084 struct hclge_vport
*vport
= hdev
->vport
;
1088 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1089 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1090 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, i
);
1094 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1095 ret
= hclge_tm_qs_schd_mode_cfg(
1096 hdev
, vport
[k
].qs_offset
+ i
,
1097 HCLGE_SCH_MODE_DWRR
);
1103 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1104 ret
= hclge_tm_schd_mode_vnet_base_cfg(vport
);
1115 int hclge_tm_schd_mode_hw(struct hclge_dev
*hdev
)
1119 ret
= hclge_tm_lvl2_schd_mode_cfg(hdev
);
1123 return hclge_tm_lvl34_schd_mode_cfg(hdev
);
1126 static int hclge_tm_schd_setup_hw(struct hclge_dev
*hdev
)
1130 /* Cfg tm mapping */
1131 ret
= hclge_tm_map_cfg(hdev
);
1136 ret
= hclge_tm_shaper_cfg(hdev
);
1141 ret
= hclge_tm_dwrr_cfg(hdev
);
1145 /* Cfg schd mode for each level schd */
1146 return hclge_tm_schd_mode_hw(hdev
);
1149 static int hclge_pause_param_setup_hw(struct hclge_dev
*hdev
)
1151 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1153 return hclge_pause_param_cfg(hdev
, mac
->mac_addr
,
1154 HCLGE_DEFAULT_PAUSE_TRANS_GAP
,
1155 HCLGE_DEFAULT_PAUSE_TRANS_TIME
);
1158 static int hclge_pfc_setup_hw(struct hclge_dev
*hdev
)
1160 u8 enable_bitmap
= 0;
1162 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
1163 enable_bitmap
= HCLGE_TX_MAC_PAUSE_EN_MSK
|
1164 HCLGE_RX_MAC_PAUSE_EN_MSK
;
1166 return hclge_pfc_pause_en_cfg(hdev
, enable_bitmap
,
1167 hdev
->tm_info
.hw_pfc_map
);
1170 /* Each Tc has a 1024 queue sets to backpress, it divides to
1171 * 32 group, each group contains 32 queue sets, which can be
1172 * represented by u32 bitmap.
1174 static int hclge_bp_setup_hw(struct hclge_dev
*hdev
, u8 tc
)
1176 struct hclge_vport
*vport
= hdev
->vport
;
1177 u32 i
, k
, qs_bitmap
;
1180 for (i
= 0; i
< HCLGE_BP_GRP_NUM
; i
++) {
1183 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1184 u16 qs_id
= vport
->qs_offset
+ tc
;
1187 grp
= hnae3_get_field(qs_id
, HCLGE_BP_GRP_ID_M
,
1189 sub_grp
= hnae3_get_field(qs_id
, HCLGE_BP_SUB_GRP_ID_M
,
1190 HCLGE_BP_SUB_GRP_ID_S
);
1192 qs_bitmap
|= (1 << sub_grp
);
1197 ret
= hclge_tm_qs_bp_cfg(hdev
, tc
, i
, qs_bitmap
);
1205 static int hclge_mac_pause_setup_hw(struct hclge_dev
*hdev
)
1209 switch (hdev
->tm_info
.fc_mode
) {
1214 case HCLGE_FC_RX_PAUSE
:
1218 case HCLGE_FC_TX_PAUSE
:
1231 return hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
1234 int hclge_pause_setup_hw(struct hclge_dev
*hdev
)
1239 ret
= hclge_pause_param_setup_hw(hdev
);
1243 if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
)
1244 return hclge_mac_pause_setup_hw(hdev
);
1246 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1247 if (!hnae3_dev_dcb_supported(hdev
))
1250 /* When MAC is GE Mode, hdev does not support pfc setting */
1251 ret
= hclge_pfc_setup_hw(hdev
);
1253 dev_warn(&hdev
->pdev
->dev
, "set pfc pause failed:%d\n", ret
);
1255 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1256 ret
= hclge_bp_setup_hw(hdev
, i
);
1264 int hclge_tm_prio_tc_info_update(struct hclge_dev
*hdev
, u8
*prio_tc
)
1266 struct hclge_vport
*vport
= hdev
->vport
;
1267 struct hnae3_knic_private_info
*kinfo
;
1270 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
1271 if (prio_tc
[i
] >= hdev
->tm_info
.num_tc
)
1273 hdev
->tm_info
.prio_tc
[i
] = prio_tc
[i
];
1275 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1276 kinfo
= &vport
[k
].nic
.kinfo
;
1277 kinfo
->prio_tc
[i
] = prio_tc
[i
];
1283 void hclge_tm_schd_info_update(struct hclge_dev
*hdev
, u8 num_tc
)
1287 hdev
->tm_info
.num_tc
= num_tc
;
1289 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1294 hdev
->tm_info
.num_tc
= 1;
1297 hdev
->hw_tc_map
= bit_map
;
1299 hclge_tm_schd_info_init(hdev
);
1302 int hclge_tm_init_hw(struct hclge_dev
*hdev
)
1306 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
1307 (hdev
->tx_sch_mode
!= HCLGE_FLAG_VNET_BASE_SCH_MODE
))
1310 ret
= hclge_tm_schd_setup_hw(hdev
);
1314 ret
= hclge_pause_setup_hw(hdev
);
1321 int hclge_tm_schd_init(struct hclge_dev
*hdev
)
1325 /* fc_mode is HCLGE_FC_FULL on reset */
1326 hdev
->tm_info
.fc_mode
= HCLGE_FC_FULL
;
1327 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
1329 ret
= hclge_tm_schd_info_init(hdev
);
1333 return hclge_tm_init_hw(hdev
);