]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
net: hns3: fix rx path skb->truesize reporting bug
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
CommitLineData
84844054
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11
12#include "hclge_cmd.h"
13#include "hclge_main.h"
14#include "hclge_tm.h"
15
16enum hclge_shaper_level {
17 HCLGE_SHAPER_LVL_PRI = 0,
18 HCLGE_SHAPER_LVL_PG = 1,
19 HCLGE_SHAPER_LVL_PORT = 2,
20 HCLGE_SHAPER_LVL_QSET = 3,
21 HCLGE_SHAPER_LVL_CNT = 4,
22 HCLGE_SHAPER_LVL_VF = 0,
23 HCLGE_SHAPER_LVL_PF = 1,
24};
25
3a7d5958
PL
26#define HCLGE_SHAPER_BS_U_DEF 5
27#define HCLGE_SHAPER_BS_S_DEF 20
84844054
S
28
29#define HCLGE_ETHER_MAX_RATE 100000
30
31/* hclge_shaper_para_calc: calculate ir parameter for the shaper
32 * @ir: Rate to be config, its unit is Mbps
33 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34 * @ir_b: IR_B parameter of IR shaper
35 * @ir_u: IR_U parameter of IR shaper
36 * @ir_s: IR_S parameter of IR shaper
37 *
38 * the formula:
39 *
40 * IR_b * (2 ^ IR_u) * 8
41 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
42 * Tick * (2 ^ IR_s)
43 *
44 * @return: 0: calculate sucessful, negative: fail
45 */
46static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 u8 *ir_b, u8 *ir_u, u8 *ir_s)
48{
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
54 };
55 u8 ir_u_calc = 0, ir_s_calc = 0;
56 u32 ir_calc;
57 u32 tick;
58
59 /* Calc tick */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 return -EINVAL;
62
63 tick = tick_array[shaper_level];
64
65 /**
66 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 * the formula is changed to:
68 * 126 * 1 * 8
69 * ir_calc = ---------------- * 1000
70 * tick * 1
71 */
72 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73
74 if (ir_calc == ir) {
75 *ir_b = 126;
76 *ir_u = 0;
77 *ir_s = 0;
78
79 return 0;
80 } else if (ir_calc > ir) {
81 /* Increasing the denominator to select ir_s value */
82 while (ir_calc > ir) {
83 ir_s_calc++;
84 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 }
86
87 if (ir_calc == ir)
88 *ir_b = 126;
89 else
90 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 } else {
92 /* Increasing the numerator to select ir_u value */
93 u32 numerator;
94
95 while (ir_calc < ir) {
96 ir_u_calc++;
97 numerator = 1008000 * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
99 }
100
101 if (ir_calc == ir) {
102 *ir_b = 126;
103 } else {
104 u32 denominator = (8000 * (1 << --ir_u_calc));
105 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 }
107 }
108
109 *ir_u = ir_u_calc;
110 *ir_s = ir_s_calc;
111
112 return 0;
113}
114
61387774 115int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
84844054
S
116{
117 struct hclge_desc desc;
118
119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120
121 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123
124 return hclge_cmd_send(&hdev->hw, &desc, 1);
125}
126
9dc2145d
YL
127static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
128 u8 pfc_bitmap)
129{
130 struct hclge_desc desc;
131 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
132
133 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
134
135 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
136 pfc->pri_en_bitmap = pfc_bitmap;
137
138 return hclge_cmd_send(&hdev->hw, &desc, 1);
139}
140
e98d7183
FL
141static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
142 u8 pause_trans_gap, u16 pause_trans_time)
18838d0c
FL
143{
144 struct hclge_cfg_pause_param_cmd *pause_param;
145 struct hclge_desc desc;
146
147 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
148
149 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
150
151 ether_addr_copy(pause_param->mac_addr, addr);
152 pause_param->pause_trans_gap = pause_trans_gap;
153 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
154
155 return hclge_cmd_send(&hdev->hw, &desc, 1);
156}
157
e98d7183 158int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
18838d0c
FL
159{
160 struct hclge_cfg_pause_param_cmd *pause_param;
161 struct hclge_desc desc;
162 u16 trans_time;
163 u8 trans_gap;
164 int ret;
165
166 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
167
168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
169
170 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
171 if (ret)
172 return ret;
173
174 trans_gap = pause_param->pause_trans_gap;
175 trans_time = le16_to_cpu(pause_param->pause_trans_time);
176
e98d7183 177 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
18838d0c
FL
178 trans_time);
179}
180
84844054
S
181static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
182{
183 u8 tc;
184
c5795c53 185 tc = hdev->tm_info.prio_tc[pri_id];
84844054
S
186
187 if (tc >= hdev->tm_info.num_tc)
188 return -EINVAL;
189
190 /**
191 * the register for priority has four bytes, the first bytes includes
192 * priority0 and priority1, the higher 4bit stands for priority1
193 * while the lower 4bit stands for priority0, as below:
194 * first byte: | pri_1 | pri_0 |
195 * second byte: | pri_3 | pri_2 |
196 * third byte: | pri_5 | pri_4 |
197 * fourth byte: | pri_7 | pri_6 |
198 */
199 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
200
201 return 0;
202}
203
204static int hclge_up_to_tc_map(struct hclge_dev *hdev)
205{
206 struct hclge_desc desc;
207 u8 *pri = (u8 *)desc.data;
208 u8 pri_id;
209 int ret;
210
211 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
212
c5795c53 213 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
84844054
S
214 ret = hclge_fill_pri_array(hdev, pri, pri_id);
215 if (ret)
216 return ret;
217 }
218
219 return hclge_cmd_send(&hdev->hw, &desc, 1);
220}
221
222static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
223 u8 pg_id, u8 pri_bit_map)
224{
225 struct hclge_pg_to_pri_link_cmd *map;
226 struct hclge_desc desc;
227
228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
229
230 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
231
232 map->pg_id = pg_id;
233 map->pri_bit_map = pri_bit_map;
234
235 return hclge_cmd_send(&hdev->hw, &desc, 1);
236}
237
238static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
239 u16 qs_id, u8 pri)
240{
241 struct hclge_qs_to_pri_link_cmd *map;
242 struct hclge_desc desc;
243
244 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
245
246 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
247
248 map->qs_id = cpu_to_le16(qs_id);
249 map->priority = pri;
250 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
251
252 return hclge_cmd_send(&hdev->hw, &desc, 1);
253}
254
255static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
256 u8 q_id, u16 qs_id)
257{
258 struct hclge_nq_to_qs_link_cmd *map;
259 struct hclge_desc desc;
260
261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
262
263 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
264
265 map->nq_id = cpu_to_le16(q_id);
266 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
267
268 return hclge_cmd_send(&hdev->hw, &desc, 1);
269}
270
271static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
272 u8 dwrr)
273{
274 struct hclge_pg_weight_cmd *weight;
275 struct hclge_desc desc;
276
277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
278
279 weight = (struct hclge_pg_weight_cmd *)desc.data;
280
281 weight->pg_id = pg_id;
282 weight->dwrr = dwrr;
283
284 return hclge_cmd_send(&hdev->hw, &desc, 1);
285}
286
287static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
288 u8 dwrr)
289{
290 struct hclge_priority_weight_cmd *weight;
291 struct hclge_desc desc;
292
293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
294
295 weight = (struct hclge_priority_weight_cmd *)desc.data;
296
297 weight->pri_id = pri_id;
298 weight->dwrr = dwrr;
299
300 return hclge_cmd_send(&hdev->hw, &desc, 1);
301}
302
303static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
304 u8 dwrr)
305{
306 struct hclge_qs_weight_cmd *weight;
307 struct hclge_desc desc;
308
309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
310
311 weight = (struct hclge_qs_weight_cmd *)desc.data;
312
313 weight->qs_id = cpu_to_le16(qs_id);
314 weight->dwrr = dwrr;
315
316 return hclge_cmd_send(&hdev->hw, &desc, 1);
317}
318
319static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
320 enum hclge_shap_bucket bucket, u8 pg_id,
321 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
322{
323 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
324 enum hclge_opcode_type opcode;
325 struct hclge_desc desc;
a90bb9a5 326 u32 shapping_para = 0;
84844054
S
327
328 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
329 HCLGE_OPC_TM_PG_C_SHAPPING;
330 hclge_cmd_setup_basic_desc(&desc, opcode, false);
331
332 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
333
334 shap_cfg_cmd->pg_id = pg_id;
335
a90bb9a5
YL
336 hclge_tm_set_field(shapping_para, IR_B, ir_b);
337 hclge_tm_set_field(shapping_para, IR_U, ir_u);
338 hclge_tm_set_field(shapping_para, IR_S, ir_s);
339 hclge_tm_set_field(shapping_para, BS_B, bs_b);
340 hclge_tm_set_field(shapping_para, BS_S, bs_s);
341
342 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
84844054
S
343
344 return hclge_cmd_send(&hdev->hw, &desc, 1);
345}
346
0a5677d3
YL
347static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
348{
349 struct hclge_port_shapping_cmd *shap_cfg_cmd;
350 struct hclge_desc desc;
351 u32 shapping_para = 0;
352 u8 ir_u, ir_b, ir_s;
353 int ret;
354
355 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
356 HCLGE_SHAPER_LVL_PORT,
357 &ir_b, &ir_u, &ir_s);
358 if (ret)
359 return ret;
360
361 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
362 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
363
364 hclge_tm_set_field(shapping_para, IR_B, ir_b);
365 hclge_tm_set_field(shapping_para, IR_U, ir_u);
366 hclge_tm_set_field(shapping_para, IR_S, ir_s);
367 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
368 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
369
370 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
371
372 return hclge_cmd_send(&hdev->hw, &desc, 1);
373}
374
84844054
S
375static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
376 enum hclge_shap_bucket bucket, u8 pri_id,
377 u8 ir_b, u8 ir_u, u8 ir_s,
378 u8 bs_b, u8 bs_s)
379{
380 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
381 enum hclge_opcode_type opcode;
382 struct hclge_desc desc;
a90bb9a5 383 u32 shapping_para = 0;
84844054
S
384
385 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
386 HCLGE_OPC_TM_PRI_C_SHAPPING;
387
388 hclge_cmd_setup_basic_desc(&desc, opcode, false);
389
390 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
391
392 shap_cfg_cmd->pri_id = pri_id;
393
a90bb9a5
YL
394 hclge_tm_set_field(shapping_para, IR_B, ir_b);
395 hclge_tm_set_field(shapping_para, IR_U, ir_u);
396 hclge_tm_set_field(shapping_para, IR_S, ir_s);
397 hclge_tm_set_field(shapping_para, BS_B, bs_b);
398 hclge_tm_set_field(shapping_para, BS_S, bs_s);
399
400 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
84844054
S
401
402 return hclge_cmd_send(&hdev->hw, &desc, 1);
403}
404
405static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
406{
407 struct hclge_desc desc;
408
409 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
410
411 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
412 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
413 else
414 desc.data[1] = 0;
415
416 desc.data[0] = cpu_to_le32(pg_id);
417
418 return hclge_cmd_send(&hdev->hw, &desc, 1);
419}
420
421static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
422{
423 struct hclge_desc desc;
424
425 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
426
427 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
428 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
429 else
430 desc.data[1] = 0;
431
432 desc.data[0] = cpu_to_le32(pri_id);
433
434 return hclge_cmd_send(&hdev->hw, &desc, 1);
435}
436
cc9bb43a 437static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
84844054
S
438{
439 struct hclge_desc desc;
440
441 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
442
cc9bb43a 443 if (mode == HCLGE_SCH_MODE_DWRR)
84844054
S
444 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
445 else
446 desc.data[1] = 0;
447
448 desc.data[0] = cpu_to_le32(qs_id);
449
450 return hclge_cmd_send(&hdev->hw, &desc, 1);
451}
452
453static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
454{
455 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
456 struct hclge_desc desc;
457
458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
459 false);
460
461 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
462
463 bp_to_qs_map_cmd->tc_id = tc;
464
465 /* Qset and tc is one by one mapping */
466 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
467
468 return hclge_cmd_send(&hdev->hw, &desc, 1);
469}
470
471static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
472{
473 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
474 struct hclge_dev *hdev = vport->back;
475 u8 i;
476
84844054
S
477 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
478 kinfo->num_tc =
479 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
480 kinfo->rss_size
481 = min_t(u16, hdev->rss_size_max,
482 kinfo->num_tqps / kinfo->num_tc);
483 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
484 vport->dwrr = 100; /* 100 percent as init */
68ece54e 485 vport->alloc_rss_size = kinfo->rss_size;
84844054
S
486
487 for (i = 0; i < kinfo->num_tc; i++) {
488 if (hdev->hw_tc_map & BIT(i)) {
489 kinfo->tc_info[i].enable = true;
490 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
491 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
492 kinfo->tc_info[i].tc = i;
84844054
S
493 } else {
494 /* Set to default queue if TC is disable */
495 kinfo->tc_info[i].enable = false;
496 kinfo->tc_info[i].tqp_offset = 0;
497 kinfo->tc_info[i].tqp_count = 1;
498 kinfo->tc_info[i].tc = 0;
84844054
S
499 }
500 }
c5795c53
YL
501
502 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
503 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
84844054
S
504}
505
506static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
507{
508 struct hclge_vport *vport = hdev->vport;
509 u32 i;
510
511 for (i = 0; i < hdev->num_alloc_vport; i++) {
512 hclge_tm_vport_tc_info_update(vport);
513
514 vport++;
515 }
516}
517
518static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
519{
520 u8 i;
521
522 for (i = 0; i < hdev->tm_info.num_tc; i++) {
523 hdev->tm_info.tc_info[i].tc_id = i;
524 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
84844054
S
525 hdev->tm_info.tc_info[i].pgid = 0;
526 hdev->tm_info.tc_info[i].bw_limit =
527 hdev->tm_info.pg_info[0].bw_limit;
528 }
529
c5795c53
YL
530 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
531 hdev->tm_info.prio_tc[i] =
532 (i >= hdev->tm_info.num_tc) ? 0 : i;
533
7979a223
YL
534 /* DCB is enabled if we have more than 1 TC */
535 if (hdev->tm_info.num_tc > 1)
536 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
537 else
538 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
84844054
S
539}
540
541static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
542{
543 u8 i;
544
545 for (i = 0; i < hdev->tm_info.num_pg; i++) {
546 int k;
547
548 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
549
550 hdev->tm_info.pg_info[i].pg_id = i;
551 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
552
553 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
554
555 if (i != 0)
556 continue;
557
558 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
559 for (k = 0; k < hdev->tm_info.num_tc; k++)
560 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
561 }
562}
563
7979a223
YL
564static void hclge_pfc_info_init(struct hclge_dev *hdev)
565{
566 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
567 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
568 dev_warn(&hdev->pdev->dev,
569 "DCB is disable, but last mode is FC_PFC\n");
570
571 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
572 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
573 /* fc_mode_last_time record the last fc_mode when
574 * DCB is enabled, so that fc_mode can be set to
575 * the correct value when DCB is disabled.
576 */
577 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
578 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
579 }
580}
581
84844054
S
582static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
583{
584 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
585 (hdev->tm_info.num_pg != 1))
586 return -EINVAL;
587
588 hclge_tm_pg_info_init(hdev);
589
590 hclge_tm_tc_info_init(hdev);
591
592 hclge_tm_vport_info_update(hdev);
593
7979a223 594 hclge_pfc_info_init(hdev);
84844054
S
595
596 return 0;
597}
598
599static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
600{
601 int ret;
602 u32 i;
603
604 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
605 return 0;
606
607 for (i = 0; i < hdev->tm_info.num_pg; i++) {
608 /* Cfg mapping */
609 ret = hclge_tm_pg_to_pri_map_cfg(
610 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
611 if (ret)
612 return ret;
613 }
614
615 return 0;
616}
617
618static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
619{
620 u8 ir_u, ir_b, ir_s;
621 int ret;
622 u32 i;
623
624 /* Cfg pg schd */
625 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
626 return 0;
627
628 /* Pg to pri */
629 for (i = 0; i < hdev->tm_info.num_pg; i++) {
630 /* Calc shaper para */
631 ret = hclge_shaper_para_calc(
632 hdev->tm_info.pg_info[i].bw_limit,
633 HCLGE_SHAPER_LVL_PG,
634 &ir_b, &ir_u, &ir_s);
635 if (ret)
636 return ret;
637
638 ret = hclge_tm_pg_shapping_cfg(hdev,
639 HCLGE_TM_SHAP_C_BUCKET, i,
640 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
641 HCLGE_SHAPER_BS_S_DEF);
642 if (ret)
643 return ret;
644
645 ret = hclge_tm_pg_shapping_cfg(hdev,
646 HCLGE_TM_SHAP_P_BUCKET, i,
647 ir_b, ir_u, ir_s,
648 HCLGE_SHAPER_BS_U_DEF,
649 HCLGE_SHAPER_BS_S_DEF);
650 if (ret)
651 return ret;
652 }
653
654 return 0;
655}
656
657static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
658{
659 int ret;
660 u32 i;
661
662 /* cfg pg schd */
663 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
664 return 0;
665
666 /* pg to prio */
667 for (i = 0; i < hdev->tm_info.num_pg; i++) {
668 /* Cfg dwrr */
669 ret = hclge_tm_pg_weight_cfg(hdev, i,
670 hdev->tm_info.pg_dwrr[i]);
671 if (ret)
672 return ret;
673 }
674
675 return 0;
676}
677
678static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
679 struct hclge_vport *vport)
680{
681 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
682 struct hnae3_queue **tqp = kinfo->tqp;
683 struct hnae3_tc_info *v_tc_info;
684 u32 i, j;
685 int ret;
686
687 for (i = 0; i < kinfo->num_tc; i++) {
688 v_tc_info = &kinfo->tc_info[i];
689 for (j = 0; j < v_tc_info->tqp_count; j++) {
690 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
691
692 ret = hclge_tm_q_to_qs_map_cfg(hdev,
693 hclge_get_queue_id(q),
694 vport->qs_offset + i);
695 if (ret)
696 return ret;
697 }
698 }
699
700 return 0;
701}
702
703static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
704{
705 struct hclge_vport *vport = hdev->vport;
706 int ret;
cc9bb43a 707 u32 i, k;
84844054
S
708
709 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
710 /* Cfg qs -> pri mapping, one by one mapping */
cc9bb43a
YL
711 for (k = 0; k < hdev->num_alloc_vport; k++)
712 for (i = 0; i < hdev->tm_info.num_tc; i++) {
713 ret = hclge_tm_qs_to_pri_map_cfg(
714 hdev, vport[k].qs_offset + i, i);
715 if (ret)
716 return ret;
717 }
84844054 718 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
84844054
S
719 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
720 for (k = 0; k < hdev->num_alloc_vport; k++)
721 for (i = 0; i < HNAE3_MAX_TC; i++) {
722 ret = hclge_tm_qs_to_pri_map_cfg(
723 hdev, vport[k].qs_offset + i, k);
724 if (ret)
725 return ret;
726 }
727 } else {
728 return -EINVAL;
729 }
730
731 /* Cfg q -> qs mapping */
732 for (i = 0; i < hdev->num_alloc_vport; i++) {
733 ret = hclge_vport_q_to_qs_map(hdev, vport);
734 if (ret)
735 return ret;
736
737 vport++;
738 }
739
740 return 0;
741}
742
743static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
744{
745 u8 ir_u, ir_b, ir_s;
746 int ret;
747 u32 i;
748
749 for (i = 0; i < hdev->tm_info.num_tc; i++) {
750 ret = hclge_shaper_para_calc(
751 hdev->tm_info.tc_info[i].bw_limit,
752 HCLGE_SHAPER_LVL_PRI,
753 &ir_b, &ir_u, &ir_s);
754 if (ret)
755 return ret;
756
757 ret = hclge_tm_pri_shapping_cfg(
758 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
759 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
760 HCLGE_SHAPER_BS_S_DEF);
761 if (ret)
762 return ret;
763
764 ret = hclge_tm_pri_shapping_cfg(
765 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
766 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
767 HCLGE_SHAPER_BS_S_DEF);
768 if (ret)
769 return ret;
770 }
771
772 return 0;
773}
774
775static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
776{
777 struct hclge_dev *hdev = vport->back;
778 u8 ir_u, ir_b, ir_s;
779 int ret;
780
781 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
782 &ir_b, &ir_u, &ir_s);
783 if (ret)
784 return ret;
785
786 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
787 vport->vport_id,
788 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
789 HCLGE_SHAPER_BS_S_DEF);
790 if (ret)
791 return ret;
792
793 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
794 vport->vport_id,
795 ir_b, ir_u, ir_s,
796 HCLGE_SHAPER_BS_U_DEF,
797 HCLGE_SHAPER_BS_S_DEF);
798 if (ret)
799 return ret;
800
801 return 0;
802}
803
804static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
805{
806 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
807 struct hclge_dev *hdev = vport->back;
84844054
S
808 u8 ir_u, ir_b, ir_s;
809 u32 i;
810 int ret;
811
812 for (i = 0; i < kinfo->num_tc; i++) {
84844054
S
813 ret = hclge_shaper_para_calc(
814 hdev->tm_info.tc_info[i].bw_limit,
815 HCLGE_SHAPER_LVL_QSET,
816 &ir_b, &ir_u, &ir_s);
817 if (ret)
818 return ret;
819 }
820
821 return 0;
822}
823
824static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
825{
826 struct hclge_vport *vport = hdev->vport;
827 int ret;
828 u32 i;
829
830 /* Need config vport shaper */
831 for (i = 0; i < hdev->num_alloc_vport; i++) {
832 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
833 if (ret)
834 return ret;
835
836 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
837 if (ret)
838 return ret;
839
840 vport++;
841 }
842
843 return 0;
844}
845
846static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
847{
848 int ret;
849
850 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
851 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
852 if (ret)
853 return ret;
854 } else {
855 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
856 if (ret)
857 return ret;
858 }
859
860 return 0;
861}
862
863static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
864{
cc9bb43a 865 struct hclge_vport *vport = hdev->vport;
84844054
S
866 struct hclge_pg_info *pg_info;
867 u8 dwrr;
868 int ret;
cc9bb43a 869 u32 i, k;
84844054
S
870
871 for (i = 0; i < hdev->tm_info.num_tc; i++) {
872 pg_info =
873 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
874 dwrr = pg_info->tc_dwrr[i];
875
876 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
877 if (ret)
878 return ret;
879
cc9bb43a
YL
880 for (k = 0; k < hdev->num_alloc_vport; k++) {
881 ret = hclge_tm_qs_weight_cfg(
882 hdev, vport[k].qs_offset + i,
883 vport[k].dwrr);
884 if (ret)
885 return ret;
886 }
84844054
S
887 }
888
889 return 0;
890}
891
892static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
893{
894 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
895 struct hclge_dev *hdev = vport->back;
896 int ret;
897 u8 i;
898
899 /* Vf dwrr */
900 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
901 if (ret)
902 return ret;
903
904 /* Qset dwrr */
905 for (i = 0; i < kinfo->num_tc; i++) {
906 ret = hclge_tm_qs_weight_cfg(
907 hdev, vport->qs_offset + i,
908 hdev->tm_info.pg_info[0].tc_dwrr[i]);
909 if (ret)
910 return ret;
911 }
912
913 return 0;
914}
915
916static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
917{
918 struct hclge_vport *vport = hdev->vport;
919 int ret;
920 u32 i;
921
922 for (i = 0; i < hdev->num_alloc_vport; i++) {
923 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
924 if (ret)
925 return ret;
926
927 vport++;
928 }
929
930 return 0;
931}
932
933static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
934{
935 int ret;
936
937 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
938 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
939 if (ret)
940 return ret;
941 } else {
942 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
943 if (ret)
944 return ret;
945 }
946
947 return 0;
948}
949
77f255c1 950int hclge_tm_map_cfg(struct hclge_dev *hdev)
84844054
S
951{
952 int ret;
953
77f255c1
YL
954 ret = hclge_up_to_tc_map(hdev);
955 if (ret)
956 return ret;
957
84844054
S
958 ret = hclge_tm_pg_to_pri_map(hdev);
959 if (ret)
960 return ret;
961
962 return hclge_tm_pri_q_qs_cfg(hdev);
963}
964
965static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
966{
967 int ret;
968
0a5677d3
YL
969 ret = hclge_tm_port_shaper_cfg(hdev);
970 if (ret)
971 return ret;
972
84844054
S
973 ret = hclge_tm_pg_shaper_cfg(hdev);
974 if (ret)
975 return ret;
976
977 return hclge_tm_pri_shaper_cfg(hdev);
978}
979
980int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
981{
982 int ret;
983
984 ret = hclge_tm_pg_dwrr_cfg(hdev);
985 if (ret)
986 return ret;
987
988 return hclge_tm_pri_dwrr_cfg(hdev);
989}
990
991static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
992{
993 int ret;
994 u8 i;
995
996 /* Only being config on TC-Based scheduler mode */
997 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
998 return 0;
999
1000 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1001 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1002 if (ret)
1003 return ret;
1004 }
1005
1006 return 0;
1007}
1008
1009static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1010{
1011 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1012 struct hclge_dev *hdev = vport->back;
1013 int ret;
1014 u8 i;
1015
1016 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1017 if (ret)
1018 return ret;
1019
1020 for (i = 0; i < kinfo->num_tc; i++) {
cc9bb43a
YL
1021 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1022
1023 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1024 sch_mode);
84844054
S
1025 if (ret)
1026 return ret;
1027 }
1028
1029 return 0;
1030}
1031
1032static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1033{
1034 struct hclge_vport *vport = hdev->vport;
1035 int ret;
cc9bb43a 1036 u8 i, k;
84844054
S
1037
1038 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1039 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1040 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1041 if (ret)
1042 return ret;
1043
cc9bb43a
YL
1044 for (k = 0; k < hdev->num_alloc_vport; k++) {
1045 ret = hclge_tm_qs_schd_mode_cfg(
1046 hdev, vport[k].qs_offset + i,
1047 HCLGE_SCH_MODE_DWRR);
1048 if (ret)
1049 return ret;
1050 }
84844054
S
1051 }
1052 } else {
1053 for (i = 0; i < hdev->num_alloc_vport; i++) {
1054 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1055 if (ret)
1056 return ret;
1057
1058 vport++;
1059 }
1060 }
1061
1062 return 0;
1063}
1064
77f255c1 1065int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
84844054
S
1066{
1067 int ret;
1068
1069 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1070 if (ret)
1071 return ret;
1072
1073 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1074}
1075
1076static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1077{
1078 int ret;
1079
1080 /* Cfg tm mapping */
1081 ret = hclge_tm_map_cfg(hdev);
1082 if (ret)
1083 return ret;
1084
1085 /* Cfg tm shaper */
1086 ret = hclge_tm_shaper_cfg(hdev);
1087 if (ret)
1088 return ret;
1089
1090 /* Cfg dwrr */
1091 ret = hclge_tm_dwrr_cfg(hdev);
1092 if (ret)
1093 return ret;
1094
1095 /* Cfg schd mode for each level schd */
1096 return hclge_tm_schd_mode_hw(hdev);
1097}
1098
e98d7183 1099static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
18838d0c
FL
1100{
1101 struct hclge_mac *mac = &hdev->hw.mac;
1102
e98d7183 1103 return hclge_pause_param_cfg(hdev, mac->mac_addr,
18838d0c
FL
1104 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1105 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1106}
1107
9dc2145d
YL
1108static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1109{
1110 u8 enable_bitmap = 0;
1111
1112 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1113 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1114 HCLGE_RX_MAC_PAUSE_EN_MSK;
1115
1116 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1117 hdev->tm_info.hw_pfc_map);
1118}
1119
1120static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1121{
1122 bool tx_en, rx_en;
1123
1124 switch (hdev->tm_info.fc_mode) {
1125 case HCLGE_FC_NONE:
1126 tx_en = false;
1127 rx_en = false;
1128 break;
1129 case HCLGE_FC_RX_PAUSE:
1130 tx_en = false;
1131 rx_en = true;
1132 break;
1133 case HCLGE_FC_TX_PAUSE:
1134 tx_en = true;
1135 rx_en = false;
1136 break;
1137 case HCLGE_FC_FULL:
1138 tx_en = true;
1139 rx_en = true;
1140 break;
1141 default:
1142 tx_en = true;
1143 rx_en = true;
1144 }
1145
1146 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1147}
1148
84844054
S
1149int hclge_pause_setup_hw(struct hclge_dev *hdev)
1150{
84844054
S
1151 int ret;
1152 u8 i;
1153
e98d7183
FL
1154 ret = hclge_pause_param_setup_hw(hdev);
1155 if (ret)
1156 return ret;
18838d0c 1157
e98d7183
FL
1158 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
1159 return hclge_mac_pause_setup_hw(hdev);
84844054 1160
9dc2145d 1161 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
2daf4a65
YL
1162 if (!hnae3_dev_dcb_supported(hdev))
1163 return 0;
1164
9dc2145d
YL
1165 /* When MAC is GE Mode, hdev does not support pfc setting */
1166 ret = hclge_pfc_setup_hw(hdev);
1167 if (ret)
1168 dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
1169
84844054
S
1170 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1171 ret = hclge_tm_qs_bp_cfg(hdev, i);
1172 if (ret)
1173 return ret;
1174 }
1175
77f255c1
YL
1176 return 0;
1177}
1178
1179int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1180{
1181 struct hclge_vport *vport = hdev->vport;
1182 struct hnae3_knic_private_info *kinfo;
1183 u32 i, k;
1184
1185 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1186 if (prio_tc[i] >= hdev->tm_info.num_tc)
1187 return -EINVAL;
1188 hdev->tm_info.prio_tc[i] = prio_tc[i];
1189
1190 for (k = 0; k < hdev->num_alloc_vport; k++) {
1191 kinfo = &vport[k].nic.kinfo;
1192 kinfo->prio_tc[i] = prio_tc[i];
1193 }
1194 }
1195 return 0;
1196}
1197
1198void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1199{
1200 u8 i, bit_map = 0;
1201
1202 hdev->tm_info.num_tc = num_tc;
1203
1204 for (i = 0; i < hdev->tm_info.num_tc; i++)
1205 bit_map |= BIT(i);
1206
1207 if (!bit_map) {
1208 bit_map = 1;
1209 hdev->tm_info.num_tc = 1;
1210 }
1211
1212 hdev->hw_tc_map = bit_map;
1213
1214 hclge_tm_schd_info_init(hdev);
84844054
S
1215}
1216
1217int hclge_tm_init_hw(struct hclge_dev *hdev)
1218{
1219 int ret;
1220
1221 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1222 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1223 return -ENOTSUPP;
1224
1225 ret = hclge_tm_schd_setup_hw(hdev);
1226 if (ret)
1227 return ret;
1228
1229 ret = hclge_pause_setup_hw(hdev);
1230 if (ret)
1231 return ret;
1232
1233 return 0;
1234}
1235
1236int hclge_tm_schd_init(struct hclge_dev *hdev)
1237{
7979a223
YL
1238 int ret;
1239
1240 /* fc_mode is HCLGE_FC_FULL on reset */
1241 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1242 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
84844054 1243
7979a223 1244 ret = hclge_tm_schd_info_init(hdev);
84844054
S
1245 if (ret)
1246 return ret;
1247
1248 return hclge_tm_init_hw(hdev);
1249}