]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ipn3ke / ipn3ke_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev.h>
11 #include <rte_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_tm_driver.h>
14
15 #include <rte_mbuf.h>
16 #include <rte_sched.h>
17 #include <rte_ethdev_driver.h>
18
19 #include <rte_io.h>
20 #include <rte_rawdev.h>
21 #include <rte_rawdev_pmd.h>
22 #include <rte_bus_ifpga.h>
23 #include <ifpga_logs.h>
24
25 #include "ipn3ke_rawdev_api.h"
26 #include "ipn3ke_flow.h"
27 #include "ipn3ke_logs.h"
28 #include "ipn3ke_ethdev.h"
29
30 #define BYTES_IN_MBPS (1000 * 1000 / 8)
31 #define SUBPORT_TC_PERIOD 10
32 #define PIPE_TC_PERIOD 40
33
34 struct ipn3ke_tm_shaper_params_range_type {
35 uint32_t m1;
36 uint32_t m2;
37 uint32_t exp;
38 uint32_t exp2;
39 uint32_t low;
40 uint32_t high;
41 };
42 struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
43 { 0, 1, 0, 1, 0, 4},
44 { 2, 3, 0, 1, 8, 12},
45 { 4, 7, 0, 1, 16, 28},
46 { 8, 15, 0, 1, 32, 60},
47 { 16, 31, 0, 1, 64, 124},
48 { 32, 63, 0, 1, 128, 252},
49 { 64, 127, 0, 1, 256, 508},
50 {128, 255, 0, 1, 512, 1020},
51 {256, 511, 0, 1, 1024, 2044},
52 {512, 1023, 0, 1, 2048, 4092},
53 {512, 1023, 1, 2, 4096, 8184},
54 {512, 1023, 2, 4, 8192, 16368},
55 {512, 1023, 3, 8, 16384, 32736},
56 {512, 1023, 4, 16, 32768, 65472},
57 {512, 1023, 5, 32, 65536, 130944},
58 {512, 1023, 6, 64, 131072, 261888},
59 {512, 1023, 7, 128, 262144, 523776},
60 {512, 1023, 8, 256, 524288, 1047552},
61 {512, 1023, 9, 512, 1048576, 2095104},
62 {512, 1023, 10, 1024, 2097152, 4190208},
63 {512, 1023, 11, 2048, 4194304, 8380416},
64 {512, 1023, 12, 4096, 8388608, 16760832},
65 {512, 1023, 13, 8192, 16777216, 33521664},
66 {512, 1023, 14, 16384, 33554432, 67043328},
67 {512, 1023, 15, 32768, 67108864, 134086656},
68 };
69
70 #define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
71 sizeof(struct ipn3ke_tm_shaper_params_range_type))
72
73 #define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
74 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
75
76 #define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
77 (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
78
79 int
80 ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
81 {
82 #define SCRATCH_DATA 0xABCDEF
83 struct ipn3ke_tm_node *nodes;
84 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
85 int node_num;
86 int i;
87
88 if (hw == NULL)
89 return -EINVAL;
90 #if IPN3KE_TM_SCRATCH_RW
91 uint32_t scratch_data;
92 IPN3KE_MASK_WRITE_REG(hw,
93 IPN3KE_TM_SCRATCH,
94 0,
95 SCRATCH_DATA,
96 0xFFFFFFFF);
97 scratch_data = IPN3KE_MASK_READ_REG(hw,
98 IPN3KE_TM_SCRATCH,
99 0,
100 0xFFFFFFFF);
101 if (scratch_data != SCRATCH_DATA)
102 return -EINVAL;
103 #endif
104 /* alloc memory for all hierarchy nodes */
105 node_num = hw->port_num +
106 IPN3KE_TM_VT_NODE_NUM +
107 IPN3KE_TM_COS_NODE_NUM;
108
109 nodes = rte_zmalloc("ipn3ke_tm_nodes",
110 sizeof(struct ipn3ke_tm_node) * node_num,
111 0);
112 if (!nodes)
113 return -ENOMEM;
114
115 /* alloc memory for Tail Drop Profile */
116 tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
117 sizeof(struct ipn3ke_tm_tdrop_profile) *
118 IPN3KE_TM_TDROP_PROFILE_NUM,
119 0);
120 if (!tdrop_profile) {
121 rte_free(nodes);
122 return -ENOMEM;
123 }
124
125 hw->nodes = nodes;
126 hw->port_nodes = nodes;
127 hw->vt_nodes = hw->port_nodes + hw->port_num;
128 hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
129 hw->tdrop_profile = tdrop_profile;
130 hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
131
132 for (i = 0, nodes = hw->port_nodes;
133 i < hw->port_num;
134 i++, nodes++) {
135 nodes->node_index = i;
136 nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
137 nodes->tm_id = RTE_TM_NODE_ID_NULL;
138 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
139 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
140 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
141 nodes->weight = 0;
142 nodes->parent_node = NULL;
143 nodes->shaper_profile.valid = 0;
144 nodes->tdrop_profile = NULL;
145 nodes->n_children = 0;
146 TAILQ_INIT(&nodes->children_node_list);
147 }
148
149 for (i = 0, nodes = hw->vt_nodes;
150 i < IPN3KE_TM_VT_NODE_NUM;
151 i++, nodes++) {
152 nodes->node_index = i;
153 nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
154 nodes->tm_id = RTE_TM_NODE_ID_NULL;
155 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
156 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
157 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
158 nodes->weight = 0;
159 nodes->parent_node = NULL;
160 nodes->shaper_profile.valid = 0;
161 nodes->tdrop_profile = NULL;
162 nodes->n_children = 0;
163 TAILQ_INIT(&nodes->children_node_list);
164 }
165
166 for (i = 0, nodes = hw->cos_nodes;
167 i < IPN3KE_TM_COS_NODE_NUM;
168 i++, nodes++) {
169 nodes->node_index = i;
170 nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
171 nodes->tm_id = RTE_TM_NODE_ID_NULL;
172 nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
173 nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
174 nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
175 nodes->weight = 0;
176 nodes->parent_node = NULL;
177 nodes->shaper_profile.valid = 0;
178 nodes->tdrop_profile = NULL;
179 nodes->n_children = 0;
180 TAILQ_INIT(&nodes->children_node_list);
181 }
182
183 for (i = 0, tdrop_profile = hw->tdrop_profile;
184 i < IPN3KE_TM_TDROP_PROFILE_NUM;
185 i++, tdrop_profile++) {
186 tdrop_profile->tdrop_profile_id = i;
187 tdrop_profile->n_users = 0;
188 tdrop_profile->valid = 0;
189 }
190
191 return 0;
192 }
193
194 void
195 ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
196 {
197 struct ipn3ke_tm_internals *tm;
198 struct ipn3ke_tm_node *port_node;
199
200 tm = &rpst->tm;
201
202 port_node = &rpst->hw->port_nodes[rpst->port_id];
203 tm->h.port_node = port_node;
204
205 tm->h.n_shaper_profiles = 0;
206 tm->h.n_tdrop_profiles = 0;
207 tm->h.n_vt_nodes = 0;
208 tm->h.n_cos_nodes = 0;
209
210 tm->h.port_commit_node = NULL;
211 TAILQ_INIT(&tm->h.vt_commit_node_list);
212 TAILQ_INIT(&tm->h.cos_commit_node_list);
213
214 tm->hierarchy_frozen = 0;
215 tm->tm_started = 1;
216 tm->tm_id = rpst->port_id;
217 }
218
219 static struct ipn3ke_tm_shaper_profile *
220 ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
221 uint32_t shaper_profile_id, struct rte_tm_error *error)
222 {
223 struct ipn3ke_tm_shaper_profile *sp = NULL;
224 uint32_t level_of_node_id;
225 uint32_t node_index;
226
227 /* Shaper profile ID must not be NONE. */
228 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
229 rte_tm_error_set(error,
230 EINVAL,
231 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
232 NULL,
233 rte_strerror(EINVAL));
234
235 return NULL;
236 }
237
238 level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
239 node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
240
241 switch (level_of_node_id) {
242 case IPN3KE_TM_NODE_LEVEL_PORT:
243 if (node_index >= hw->port_num)
244 rte_tm_error_set(error,
245 EEXIST,
246 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
247 NULL,
248 rte_strerror(EEXIST));
249 else
250 sp = &hw->port_nodes[node_index].shaper_profile;
251
252 break;
253
254 case IPN3KE_TM_NODE_LEVEL_VT:
255 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
256 rte_tm_error_set(error,
257 EEXIST,
258 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
259 NULL,
260 rte_strerror(EEXIST));
261 else
262 sp = &hw->vt_nodes[node_index].shaper_profile;
263
264 break;
265
266 case IPN3KE_TM_NODE_LEVEL_COS:
267 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
268 rte_tm_error_set(error,
269 EEXIST,
270 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
271 NULL,
272 rte_strerror(EEXIST));
273 else
274 sp = &hw->cos_nodes[node_index].shaper_profile;
275
276 break;
277 default:
278 rte_tm_error_set(error,
279 EEXIST,
280 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
281 NULL,
282 rte_strerror(EEXIST));
283 }
284
285 return sp;
286 }
287
288 static struct ipn3ke_tm_tdrop_profile *
289 ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
290 uint32_t tdrop_profile_id)
291 {
292 struct ipn3ke_tm_tdrop_profile *tdrop_profile;
293
294 if (tdrop_profile_id >= hw->tdrop_profile_num)
295 return NULL;
296
297 tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
298 if (tdrop_profile->valid)
299 return tdrop_profile;
300
301 return NULL;
302 }
303
304 static struct ipn3ke_tm_node *
305 ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
306 uint32_t node_id, uint32_t state_mask)
307 {
308 uint32_t level_of_node_id;
309 uint32_t node_index;
310 struct ipn3ke_tm_node *n;
311
312 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
313 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
314
315 switch (level_of_node_id) {
316 case IPN3KE_TM_NODE_LEVEL_PORT:
317 if (node_index >= hw->port_num)
318 return NULL;
319 n = &hw->port_nodes[node_index];
320
321 break;
322 case IPN3KE_TM_NODE_LEVEL_VT:
323 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
324 return NULL;
325 n = &hw->vt_nodes[node_index];
326
327 break;
328 case IPN3KE_TM_NODE_LEVEL_COS:
329 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
330 return NULL;
331 n = &hw->cos_nodes[node_index];
332
333 break;
334 default:
335 return NULL;
336 }
337
338 /* Check tm node status */
339 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
340 if (n->tm_id != RTE_TM_NODE_ID_NULL ||
341 n->parent_node_id != RTE_TM_NODE_ID_NULL ||
342 n->parent_node != NULL ||
343 n->n_children > 0) {
344 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
345 }
346 } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
347 if (n->tm_id == RTE_TM_NODE_ID_NULL ||
348 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
349 n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
350 (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
351 n->parent_node == NULL)) {
352 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
353 }
354 } else {
355 IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
356 }
357
358 if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
359 if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
360 return n;
361 else if (n->tm_id == tm_id)
362 return n;
363 else
364 return NULL;
365 } else {
366 return NULL;
367 }
368 }
369
370 /* Traffic manager node type get */
371 static int
372 ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
373 uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
374 {
375 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
376 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
377 uint32_t tm_id;
378 struct ipn3ke_tm_node *node;
379 uint32_t state_mask;
380
381 if (is_leaf == NULL)
382 return -rte_tm_error_set(error,
383 EINVAL,
384 RTE_TM_ERROR_TYPE_UNSPECIFIED,
385 NULL,
386 rte_strerror(EINVAL));
387
388 tm_id = tm->tm_id;
389
390 state_mask = 0;
391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
392 node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
393 if (node_id == RTE_TM_NODE_ID_NULL ||
394 node == NULL)
395 return -rte_tm_error_set(error,
396 EINVAL,
397 RTE_TM_ERROR_TYPE_NODE_ID,
398 NULL,
399 rte_strerror(EINVAL));
400
401 *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
402
403 return 0;
404 }
405
406 #define WRED_SUPPORTED 0
407
408 #define STATS_MASK_DEFAULT \
409 (RTE_TM_STATS_N_PKTS | \
410 RTE_TM_STATS_N_BYTES | \
411 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
412 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
413
414 #define STATS_MASK_QUEUE \
415 (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
416
417 /* Traffic manager capabilities get */
418 static int
419 ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
420 struct rte_tm_capabilities *cap, struct rte_tm_error *error)
421 {
422 if (cap == NULL)
423 return -rte_tm_error_set(error,
424 EINVAL,
425 RTE_TM_ERROR_TYPE_CAPABILITIES,
426 NULL,
427 rte_strerror(EINVAL));
428
429 /* set all the parameters to 0 first. */
430 memset(cap, 0, sizeof(*cap));
431
432 cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
433 cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
434
435 cap->non_leaf_nodes_identical = 0;
436 cap->leaf_nodes_identical = 1;
437
438 cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
439 cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
440 cap->shaper_private_dual_rate_n_max = 0;
441 cap->shaper_private_rate_min = 1;
442 cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
443
444 cap->shaper_shared_n_max = 0;
445 cap->shaper_shared_n_nodes_per_shaper_max = 0;
446 cap->shaper_shared_n_shapers_per_node_max = 0;
447 cap->shaper_shared_dual_rate_n_max = 0;
448 cap->shaper_shared_rate_min = 0;
449 cap->shaper_shared_rate_max = 0;
450
451 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
452 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
453
454 cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
455 cap->sched_sp_n_priorities_max = 3;
456 cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
457 cap->sched_wfq_n_groups_max = 1;
458 cap->sched_wfq_weight_max = UINT32_MAX;
459
460 cap->cman_wred_packet_mode_supported = 0;
461 cap->cman_wred_byte_mode_supported = 0;
462 cap->cman_head_drop_supported = 0;
463 cap->cman_wred_context_n_max = 0;
464 cap->cman_wred_context_private_n_max = 0;
465 cap->cman_wred_context_shared_n_max = 0;
466 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
467 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
468
469 /**
470 * cap->mark_vlan_dei_supported = {0, 0, 0};
471 * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
472 * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
473 * cap->mark_ip_dscp_supported = {0, 0, 0};
474 */
475
476 cap->dynamic_update_mask = 0;
477
478 cap->stats_mask = 0;
479
480 return 0;
481 }
482
483 /* Traffic manager level capabilities get */
484 static int
485 ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
486 uint32_t level_id, struct rte_tm_level_capabilities *cap,
487 struct rte_tm_error *error)
488 {
489 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
490
491 if (cap == NULL)
492 return -rte_tm_error_set(error,
493 EINVAL,
494 RTE_TM_ERROR_TYPE_CAPABILITIES,
495 NULL,
496 rte_strerror(EINVAL));
497
498 if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
499 return -rte_tm_error_set(error,
500 EINVAL,
501 RTE_TM_ERROR_TYPE_LEVEL_ID,
502 NULL,
503 rte_strerror(EINVAL));
504
505 /* set all the parameters to 0 first. */
506 memset(cap, 0, sizeof(*cap));
507
508 switch (level_id) {
509 case IPN3KE_TM_NODE_LEVEL_PORT:
510 cap->n_nodes_max = hw->port_num;
511 cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
512 cap->n_nodes_leaf_max = 0;
513 cap->non_leaf_nodes_identical = 0;
514 cap->leaf_nodes_identical = 0;
515
516 cap->nonleaf.shaper_private_supported = 0;
517 cap->nonleaf.shaper_private_dual_rate_supported = 0;
518 cap->nonleaf.shaper_private_rate_min = 1;
519 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
520 cap->nonleaf.shaper_shared_n_max = 0;
521
522 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
523 cap->nonleaf.sched_sp_n_priorities_max = 1;
524 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
525 cap->nonleaf.sched_wfq_n_groups_max = 0;
526 cap->nonleaf.sched_wfq_weight_max = 0;
527
528 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
529 break;
530
531 case IPN3KE_TM_NODE_LEVEL_VT:
532 cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
533 cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
534 cap->n_nodes_leaf_max = 0;
535 cap->non_leaf_nodes_identical = 0;
536 cap->leaf_nodes_identical = 0;
537
538 cap->nonleaf.shaper_private_supported = 0;
539 cap->nonleaf.shaper_private_dual_rate_supported = 0;
540 cap->nonleaf.shaper_private_rate_min = 1;
541 cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
542 cap->nonleaf.shaper_shared_n_max = 0;
543
544 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
545 cap->nonleaf.sched_sp_n_priorities_max = 1;
546 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
547 cap->nonleaf.sched_wfq_n_groups_max = 0;
548 cap->nonleaf.sched_wfq_weight_max = 0;
549
550 cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
551 break;
552
553 case IPN3KE_TM_NODE_LEVEL_COS:
554 cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
555 cap->n_nodes_nonleaf_max = 0;
556 cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
557 cap->non_leaf_nodes_identical = 0;
558 cap->leaf_nodes_identical = 0;
559
560 cap->leaf.shaper_private_supported = 0;
561 cap->leaf.shaper_private_dual_rate_supported = 0;
562 cap->leaf.shaper_private_rate_min = 0;
563 cap->leaf.shaper_private_rate_max = 0;
564 cap->leaf.shaper_shared_n_max = 0;
565
566 cap->leaf.cman_head_drop_supported = 0;
567 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
568 cap->leaf.cman_wred_byte_mode_supported = 0;
569 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
570 cap->leaf.cman_wred_context_shared_n_max = 0;
571
572 cap->leaf.stats_mask = STATS_MASK_QUEUE;
573 break;
574
575 default:
576 return -rte_tm_error_set(error,
577 EINVAL,
578 RTE_TM_ERROR_TYPE_LEVEL_ID,
579 NULL,
580 rte_strerror(EINVAL));
581 break;
582 }
583
584 return 0;
585 }
586
587 /* Traffic manager node capabilities get */
588 static int
589 ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
590 uint32_t node_id, struct rte_tm_node_capabilities *cap,
591 struct rte_tm_error *error)
592 {
593 struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
594 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
595 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
596 uint32_t tm_id;
597 struct ipn3ke_tm_node *tm_node;
598 uint32_t state_mask;
599
600 if (cap == NULL)
601 return -rte_tm_error_set(error,
602 EINVAL,
603 RTE_TM_ERROR_TYPE_CAPABILITIES,
604 NULL,
605 rte_strerror(EINVAL));
606
607 tm_id = tm->tm_id;
608
609 state_mask = 0;
610 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
611 tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
612 if (tm_node == NULL)
613 return -rte_tm_error_set(error,
614 EINVAL,
615 RTE_TM_ERROR_TYPE_NODE_ID,
616 NULL,
617 rte_strerror(EINVAL));
618
619 if (tm_node->tm_id != representor->port_id)
620 return -rte_tm_error_set(error,
621 EINVAL,
622 RTE_TM_ERROR_TYPE_NODE_ID,
623 NULL,
624 rte_strerror(EINVAL));
625
626 /* set all the parameters to 0 first. */
627 memset(cap, 0, sizeof(*cap));
628
629 switch (tm_node->level) {
630 case IPN3KE_TM_NODE_LEVEL_PORT:
631 cap->shaper_private_supported = 1;
632 cap->shaper_private_dual_rate_supported = 0;
633 cap->shaper_private_rate_min = 1;
634 cap->shaper_private_rate_max = UINT32_MAX;
635 cap->shaper_shared_n_max = 0;
636
637 cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
638 cap->nonleaf.sched_sp_n_priorities_max = 1;
639 cap->nonleaf.sched_wfq_n_children_per_group_max =
640 IPN3KE_TM_VT_NODE_NUM;
641 cap->nonleaf.sched_wfq_n_groups_max = 1;
642 cap->nonleaf.sched_wfq_weight_max = 1;
643
644 cap->stats_mask = STATS_MASK_DEFAULT;
645 break;
646
647 case IPN3KE_TM_NODE_LEVEL_VT:
648 cap->shaper_private_supported = 1;
649 cap->shaper_private_dual_rate_supported = 0;
650 cap->shaper_private_rate_min = 1;
651 cap->shaper_private_rate_max = UINT32_MAX;
652 cap->shaper_shared_n_max = 0;
653
654 cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
655 cap->nonleaf.sched_sp_n_priorities_max = 1;
656 cap->nonleaf.sched_wfq_n_children_per_group_max =
657 IPN3KE_TM_COS_NODE_NUM;
658 cap->nonleaf.sched_wfq_n_groups_max = 1;
659 cap->nonleaf.sched_wfq_weight_max = 1;
660
661 cap->stats_mask = STATS_MASK_DEFAULT;
662 break;
663
664 case IPN3KE_TM_NODE_LEVEL_COS:
665 cap->shaper_private_supported = 0;
666 cap->shaper_private_dual_rate_supported = 0;
667 cap->shaper_private_rate_min = 0;
668 cap->shaper_private_rate_max = 0;
669 cap->shaper_shared_n_max = 0;
670
671 cap->leaf.cman_head_drop_supported = 0;
672 cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
673 cap->leaf.cman_wred_byte_mode_supported = 0;
674 cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
675 cap->leaf.cman_wred_context_shared_n_max = 0;
676
677 cap->stats_mask = STATS_MASK_QUEUE;
678 break;
679 default:
680 break;
681 }
682
683 return 0;
684 }
685
686 static int
687 ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
688 struct ipn3ke_tm_shaper_profile *local_profile,
689 const struct ipn3ke_tm_shaper_params_range_type *ref_data)
690 {
691 uint32_t i;
692 const struct ipn3ke_tm_shaper_params_range_type *r;
693 uint64_t rate;
694
695 rate = profile->peak.rate;
696 for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
697 if (rate >= r->low &&
698 rate <= r->high) {
699 local_profile->m = (rate / 4) / r->exp2;
700 local_profile->e = r->exp;
701 local_profile->rate = rate;
702
703 return 0;
704 }
705 }
706
707 return -1;
708 }
709
710 static int
711 ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
712 uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
713 struct rte_tm_error *error)
714 {
715 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
716 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
717 struct ipn3ke_tm_shaper_profile *sp;
718
719 /* Shaper profile must not exist. */
720 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
721 if (!sp || (sp && sp->valid))
722 return -rte_tm_error_set(error,
723 EEXIST,
724 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
725 NULL,
726 rte_strerror(EEXIST));
727
728 /* Profile must not be NULL. */
729 if (profile == NULL)
730 return -rte_tm_error_set(error,
731 EINVAL,
732 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
733 NULL,
734 rte_strerror(EINVAL));
735
736 /* Peak rate: non-zero, 32-bit */
737 if (profile->peak.rate == 0 ||
738 profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
739 return -rte_tm_error_set(error,
740 EINVAL,
741 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
742 NULL,
743 rte_strerror(EINVAL));
744
745 /* Peak size: non-zero, 32-bit */
746 if (profile->peak.size != 0)
747 return -rte_tm_error_set(error,
748 EINVAL,
749 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
750 NULL,
751 rte_strerror(EINVAL));
752
753 /* Dual-rate profiles are not supported. */
754 if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
755 return -rte_tm_error_set(error,
756 EINVAL,
757 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
758 NULL,
759 rte_strerror(EINVAL));
760
761 /* Packet length adjust: 24 bytes */
762 if (profile->pkt_length_adjust != 0)
763 return -rte_tm_error_set(error,
764 EINVAL,
765 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
766 NULL,
767 rte_strerror(EINVAL));
768
769 if (ipn3ke_tm_shaper_parame_trans(profile,
770 sp,
771 ipn3ke_tm_shaper_params_rang)) {
772 return -rte_tm_error_set(error,
773 EINVAL,
774 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
775 NULL,
776 rte_strerror(EINVAL));
777 } else {
778 sp->valid = 1;
779 rte_memcpy(&sp->params, profile, sizeof(sp->params));
780 }
781
782 tm->h.n_shaper_profiles++;
783
784 return 0;
785 }
786
787 /* Traffic manager shaper profile delete */
788 static int
789 ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
790 uint32_t shaper_profile_id, struct rte_tm_error *error)
791 {
792 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
793 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
794 struct ipn3ke_tm_shaper_profile *sp;
795
796 /* Check existing */
797 sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
798 if (!sp || (sp && !sp->valid))
799 return -rte_tm_error_set(error,
800 EINVAL,
801 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
802 NULL,
803 rte_strerror(EINVAL));
804
805 sp->valid = 0;
806 tm->h.n_shaper_profiles--;
807
808 return 0;
809 }
810
811 static int
812 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
813 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
814 struct rte_tm_error *error)
815 {
816 enum rte_color color;
817
818 /* TDROP profile ID must not be NONE. */
819 if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
820 return -rte_tm_error_set(error,
821 EINVAL,
822 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
823 NULL,
824 rte_strerror(EINVAL));
825
826 /* Profile must not be NULL. */
827 if (profile == NULL)
828 return -rte_tm_error_set(error,
829 EINVAL,
830 RTE_TM_ERROR_TYPE_WRED_PROFILE,
831 NULL,
832 rte_strerror(EINVAL));
833
834 /* TDROP profile should be in packet mode */
835 if (profile->packet_mode != 0)
836 return -rte_tm_error_set(error,
837 ENOTSUP,
838 RTE_TM_ERROR_TYPE_WRED_PROFILE,
839 NULL,
840 rte_strerror(ENOTSUP));
841
842 /* min_th <= max_th, max_th > 0 */
843 for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
844 uint64_t min_th = profile->red_params[color].min_th;
845 uint64_t max_th = profile->red_params[color].max_th;
846
847 if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
848 IPN3KE_TDROP_TH1_SHIFT) ||
849 max_th != 0)
850 return -rte_tm_error_set(error,
851 EINVAL,
852 RTE_TM_ERROR_TYPE_WRED_PROFILE,
853 NULL,
854 rte_strerror(EINVAL));
855 }
856
857 return 0;
858 }
859
860 static int
861 ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
862 struct ipn3ke_tm_tdrop_profile *tp)
863 {
864 if (tp->valid) {
865 IPN3KE_MASK_WRITE_REG(hw,
866 IPN3KE_CCB_PROFILE_MS,
867 0,
868 tp->th2,
869 IPN3KE_CCB_PROFILE_MS_MASK);
870
871 IPN3KE_MASK_WRITE_REG(hw,
872 IPN3KE_CCB_PROFILE_P,
873 tp->tdrop_profile_id,
874 tp->th1,
875 IPN3KE_CCB_PROFILE_MASK);
876 } else {
877 IPN3KE_MASK_WRITE_REG(hw,
878 IPN3KE_CCB_PROFILE_MS,
879 0,
880 0,
881 IPN3KE_CCB_PROFILE_MS_MASK);
882
883 IPN3KE_MASK_WRITE_REG(hw,
884 IPN3KE_CCB_PROFILE_P,
885 tp->tdrop_profile_id,
886 0,
887 IPN3KE_CCB_PROFILE_MASK);
888 }
889
890 return 0;
891 }
892
893 /* Traffic manager TDROP profile add */
894 static int
895 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
896 uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
897 struct rte_tm_error *error)
898 {
899 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
900 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
901 struct ipn3ke_tm_tdrop_profile *tp;
902 int status;
903 uint64_t min_th;
904 uint32_t th1, th2;
905
906 /* Check input params */
907 status = ipn3ke_tm_tdrop_profile_check(dev,
908 tdrop_profile_id,
909 profile,
910 error);
911 if (status)
912 return status;
913
914 /* Memory allocation */
915 tp = &hw->tdrop_profile[tdrop_profile_id];
916
917 /* Fill in */
918 tp->valid = 1;
919 min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
920 th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
921 th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
922 IPN3KE_TDROP_TH2_MASK);
923 tp->th1 = th1;
924 tp->th2 = th2;
925 rte_memcpy(&tp->params, profile, sizeof(tp->params));
926
927 /* Add to list */
928 tm->h.n_tdrop_profiles++;
929
930 /* Write FPGA */
931 ipn3ke_hw_tm_tdrop_wr(hw, tp);
932
933 return 0;
934 }
935
936 /* Traffic manager TDROP profile delete */
937 static int
938 ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
939 uint32_t tdrop_profile_id, struct rte_tm_error *error)
940 {
941 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
942 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
943 struct ipn3ke_tm_tdrop_profile *tp;
944
945 /* Check existing */
946 tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
947 if (tp == NULL)
948 return -rte_tm_error_set(error,
949 EINVAL,
950 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
951 NULL,
952 rte_strerror(EINVAL));
953
954 /* Check unused */
955 if (tp->n_users)
956 return -rte_tm_error_set(error,
957 EBUSY,
958 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
959 NULL,
960 rte_strerror(EBUSY));
961
962 /* Set free */
963 tp->valid = 0;
964 tm->h.n_tdrop_profiles--;
965
966 /* Write FPGA */
967 ipn3ke_hw_tm_tdrop_wr(hw, tp);
968
969 return 0;
970 }
971
972 static int
973 ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
974 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
975 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
976 struct rte_tm_error *error)
977 {
978 uint32_t level_of_node_id;
979 uint32_t node_index;
980 uint32_t parent_level_id;
981
982 if (node_id == RTE_TM_NODE_ID_NULL)
983 return -rte_tm_error_set(error,
984 EINVAL,
985 RTE_TM_ERROR_TYPE_NODE_ID,
986 NULL,
987 rte_strerror(EINVAL));
988
989 /* priority: must be 0, 1, 2, 3 */
990 if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
991 return -rte_tm_error_set(error,
992 EINVAL,
993 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
994 NULL,
995 rte_strerror(EINVAL));
996
997 /* weight: must be 1 .. 255 */
998 if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
999 return -rte_tm_error_set(error,
1000 EINVAL,
1001 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1002 NULL,
1003 rte_strerror(EINVAL));
1004
1005 /* check node id and parent id*/
1006 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1007 if (level_of_node_id != level_id)
1008 return -rte_tm_error_set(error,
1009 EINVAL,
1010 RTE_TM_ERROR_TYPE_NODE_ID,
1011 NULL,
1012 rte_strerror(EINVAL));
1013 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1014 parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1015 switch (level_id) {
1016 case IPN3KE_TM_NODE_LEVEL_PORT:
1017 if (node_index != tm_id)
1018 return -rte_tm_error_set(error,
1019 EINVAL,
1020 RTE_TM_ERROR_TYPE_NODE_ID,
1021 NULL,
1022 rte_strerror(EINVAL));
1023 if (parent_node_id != RTE_TM_NODE_ID_NULL)
1024 return -rte_tm_error_set(error,
1025 EINVAL,
1026 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1027 NULL,
1028 rte_strerror(EINVAL));
1029 break;
1030
1031 case IPN3KE_TM_NODE_LEVEL_VT:
1032 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1033 return -rte_tm_error_set(error,
1034 EINVAL,
1035 RTE_TM_ERROR_TYPE_NODE_ID,
1036 NULL,
1037 rte_strerror(EINVAL));
1038 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1039 return -rte_tm_error_set(error,
1040 EINVAL,
1041 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1042 NULL,
1043 rte_strerror(EINVAL));
1044 break;
1045
1046 case IPN3KE_TM_NODE_LEVEL_COS:
1047 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1048 return -rte_tm_error_set(error,
1049 EINVAL,
1050 RTE_TM_ERROR_TYPE_NODE_ID,
1051 NULL,
1052 rte_strerror(EINVAL));
1053 if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1054 return -rte_tm_error_set(error,
1055 EINVAL,
1056 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1057 NULL,
1058 rte_strerror(EINVAL));
1059 break;
1060 default:
1061 return -rte_tm_error_set(error,
1062 EINVAL,
1063 RTE_TM_ERROR_TYPE_LEVEL_ID,
1064 NULL,
1065 rte_strerror(EINVAL));
1066 }
1067
1068 /* params: must not be NULL */
1069 if (params == NULL)
1070 return -rte_tm_error_set(error,
1071 EINVAL,
1072 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1073 NULL,
1074 rte_strerror(EINVAL));
1075 /* No shared shapers */
1076 if (params->n_shared_shapers != 0)
1077 return -rte_tm_error_set(error,
1078 EINVAL,
1079 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1080 NULL,
1081 rte_strerror(EINVAL));
1082 return 0;
1083 }
1084
1085 static int
1086 ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1087 uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1088 struct rte_tm_error *error)
1089 {
1090 /*struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);*/
1091 uint32_t node_index;
1092 uint32_t parent_index;
1093 uint32_t parent_index1;
1094
1095 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1096 parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1097 parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1098 switch (level_id) {
1099 case IPN3KE_TM_NODE_LEVEL_PORT:
1100 break;
1101
1102 case IPN3KE_TM_NODE_LEVEL_VT:
1103 if (parent_index != tm_id)
1104 return -rte_tm_error_set(error,
1105 EINVAL,
1106 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1107 NULL,
1108 rte_strerror(EINVAL));
1109 break;
1110
1111 case IPN3KE_TM_NODE_LEVEL_COS:
1112 if (parent_index != parent_index1)
1113 return -rte_tm_error_set(error,
1114 EINVAL,
1115 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1116 NULL,
1117 rte_strerror(EINVAL));
1118 break;
1119 default:
1120 return -rte_tm_error_set(error,
1121 EINVAL,
1122 RTE_TM_ERROR_TYPE_LEVEL_ID,
1123 NULL,
1124 rte_strerror(EINVAL));
1125 }
1126
1127 return 0;
1128 }
1129
1130 /* Traffic manager node add */
1131 static int
1132 ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1133 uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1134 uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1135 struct rte_tm_error *error)
1136 {
1137 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1138 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1139 uint32_t tm_id;
1140 struct ipn3ke_tm_node *n, *parent_node;
1141 uint32_t node_state, state_mask;
1142 int status;
1143
1144 /* Checks */
1145 if (tm->hierarchy_frozen)
1146 return -rte_tm_error_set(error,
1147 EBUSY,
1148 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1149 NULL,
1150 rte_strerror(EBUSY));
1151
1152 tm_id = tm->tm_id;
1153
1154 status = ipn3ke_tm_node_add_check_parameter(tm_id,
1155 node_id,
1156 parent_node_id,
1157 priority,
1158 weight,
1159 level_id,
1160 params,
1161 error);
1162 if (status)
1163 return status;
1164
1165 status = ipn3ke_tm_node_add_check_mount(tm_id,
1166 node_id,
1167 parent_node_id,
1168 level_id,
1169 error);
1170 if (status)
1171 return status;
1172
1173 /* Shaper profile ID must not be NONE. */
1174 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1175 params->shaper_profile_id != node_id)
1176 return -rte_tm_error_set(error,
1177 EINVAL,
1178 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1179 NULL,
1180 rte_strerror(EINVAL));
1181
1182 /* Memory allocation */
1183 state_mask = 0;
1184 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1185 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1186 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1187 if (!n)
1188 return -rte_tm_error_set(error,
1189 EINVAL,
1190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1191 NULL,
1192 rte_strerror(EINVAL));
1193 node_state = n->node_state;
1194
1195 /* Check parent node */
1196 state_mask = 0;
1197 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1198 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1199 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1200 parent_node = ipn3ke_hw_tm_node_search(hw,
1201 tm_id,
1202 parent_node_id,
1203 state_mask);
1204 if (!parent_node)
1205 return -rte_tm_error_set(error,
1206 EINVAL,
1207 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1208 NULL,
1209 rte_strerror(EINVAL));
1210 } else {
1211 parent_node = NULL;
1212 }
1213
1214 switch (level_id) {
1215 case IPN3KE_TM_NODE_LEVEL_PORT:
1216 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1217 n->tm_id = tm_id;
1218 tm->h.port_commit_node = n;
1219 break;
1220
1221 case IPN3KE_TM_NODE_LEVEL_VT:
1222 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1223 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1224 if (parent_node)
1225 parent_node->n_children++;
1226 tm->h.n_vt_nodes++;
1227 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1228 if (parent_node)
1229 parent_node->n_children++;
1230 tm->h.n_vt_nodes++;
1231 }
1232 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1233 n->parent_node_id = parent_node_id;
1234 n->tm_id = tm_id;
1235 n->parent_node = parent_node;
1236
1237 break;
1238
1239 case IPN3KE_TM_NODE_LEVEL_COS:
1240 if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1241 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1242 n, node);
1243 if (parent_node)
1244 parent_node->n_children++;
1245 tm->h.n_cos_nodes++;
1246 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1247 if (parent_node)
1248 parent_node->n_children++;
1249 tm->h.n_cos_nodes++;
1250 }
1251 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1252 n->parent_node_id = parent_node_id;
1253 n->tm_id = tm_id;
1254 n->parent_node = parent_node;
1255
1256 break;
1257 default:
1258 return -rte_tm_error_set(error,
1259 EINVAL,
1260 RTE_TM_ERROR_TYPE_LEVEL_ID,
1261 NULL,
1262 rte_strerror(EINVAL));
1263 }
1264
1265 /* Fill in */
1266 n->priority = priority;
1267 n->weight = weight;
1268
1269 if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1270 params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1271 n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1272 params->leaf.wred.wred_profile_id);
1273
1274 rte_memcpy(&n->params, params, sizeof(n->params));
1275
1276 return 0;
1277 }
1278
1279 static int
1280 ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1281 uint32_t node_id, struct rte_tm_error *error)
1282 {
1283 uint32_t level_of_node_id;
1284 uint32_t node_index;
1285
1286 if (node_id == RTE_TM_NODE_ID_NULL)
1287 return -rte_tm_error_set(error,
1288 EINVAL,
1289 RTE_TM_ERROR_TYPE_NODE_ID,
1290 NULL,
1291 rte_strerror(EINVAL));
1292
1293 /* check node id and parent id*/
1294 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1295 node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1296 switch (level_of_node_id) {
1297 case IPN3KE_TM_NODE_LEVEL_PORT:
1298 if (node_index != tm_id)
1299 return -rte_tm_error_set(error,
1300 EINVAL,
1301 RTE_TM_ERROR_TYPE_NODE_ID,
1302 NULL,
1303 rte_strerror(EINVAL));
1304 break;
1305
1306 case IPN3KE_TM_NODE_LEVEL_VT:
1307 if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1308 return -rte_tm_error_set(error,
1309 EINVAL,
1310 RTE_TM_ERROR_TYPE_NODE_ID,
1311 NULL,
1312 rte_strerror(EINVAL));
1313 break;
1314
1315 case IPN3KE_TM_NODE_LEVEL_COS:
1316 if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1317 return -rte_tm_error_set(error,
1318 EINVAL,
1319 RTE_TM_ERROR_TYPE_NODE_ID,
1320 NULL,
1321 rte_strerror(EINVAL));
1322 break;
1323 default:
1324 return -rte_tm_error_set(error,
1325 EINVAL,
1326 RTE_TM_ERROR_TYPE_LEVEL_ID,
1327 NULL,
1328 rte_strerror(EINVAL));
1329 }
1330
1331 return 0;
1332 }
1333
1334 /* Traffic manager node delete */
1335 static int
1336 ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1337 uint32_t node_id, struct rte_tm_error *error)
1338 {
1339 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1340 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1341 struct ipn3ke_tm_node *n, *parent_node;
1342 uint32_t tm_id;
1343 int status;
1344 uint32_t level_of_node_id;
1345 uint32_t node_state;
1346 uint32_t state_mask;
1347
1348 /* Check hierarchy changes are currently allowed */
1349 if (tm->hierarchy_frozen)
1350 return -rte_tm_error_set(error,
1351 EBUSY,
1352 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1353 NULL,
1354 rte_strerror(EBUSY));
1355
1356 tm_id = tm->tm_id;
1357
1358 status = ipn3ke_tm_node_del_check_parameter(tm_id,
1359 node_id,
1360 error);
1361 if (status)
1362 return status;
1363
1364 /* Check existing */
1365 state_mask = 0;
1366 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1367 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1368 n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1369 if (n == NULL)
1370 return -rte_tm_error_set(error,
1371 EINVAL,
1372 RTE_TM_ERROR_TYPE_NODE_ID,
1373 NULL,
1374 rte_strerror(EINVAL));
1375
1376 if (n->n_children > 0)
1377 return -rte_tm_error_set(error,
1378 EINVAL,
1379 RTE_TM_ERROR_TYPE_NODE_ID,
1380 NULL,
1381 rte_strerror(EINVAL));
1382
1383 node_state = n->node_state;
1384
1385 level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1386
1387 /* Check parent node */
1388 if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1389 state_mask = 0;
1390 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1391 IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1392 parent_node = ipn3ke_hw_tm_node_search(hw,
1393 tm_id,
1394 n->parent_node_id,
1395 state_mask);
1396 if (!parent_node)
1397 return -rte_tm_error_set(error,
1398 EINVAL,
1399 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1400 NULL,
1401 rte_strerror(EINVAL));
1402 if (n->parent_node != parent_node)
1403 return -rte_tm_error_set(error,
1404 EINVAL,
1405 RTE_TM_ERROR_TYPE_NODE_ID,
1406 NULL,
1407 rte_strerror(EINVAL));
1408 } else {
1409 parent_node = NULL;
1410 }
1411
1412 switch (level_of_node_id) {
1413 case IPN3KE_TM_NODE_LEVEL_PORT:
1414 if (tm->h.port_node != n)
1415 return -rte_tm_error_set(error,
1416 EINVAL,
1417 RTE_TM_ERROR_TYPE_NODE_ID,
1418 NULL,
1419 rte_strerror(EINVAL));
1420 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1421 tm->h.port_commit_node = n;
1422
1423 break;
1424
1425 case IPN3KE_TM_NODE_LEVEL_VT:
1426 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1427 if (parent_node)
1428 TAILQ_REMOVE(&parent_node->children_node_list,
1429 n, node);
1430 TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1431 if (parent_node)
1432 parent_node->n_children--;
1433 tm->h.n_vt_nodes--;
1434 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1435 if (parent_node)
1436 parent_node->n_children--;
1437 tm->h.n_vt_nodes--;
1438 }
1439 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1440
1441 break;
1442
1443 case IPN3KE_TM_NODE_LEVEL_COS:
1444 if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1445 if (parent_node)
1446 TAILQ_REMOVE(&parent_node->children_node_list,
1447 n, node);
1448 TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1449 n, node);
1450 if (parent_node)
1451 parent_node->n_children--;
1452 tm->h.n_cos_nodes--;
1453 } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1454 if (parent_node)
1455 parent_node->n_children--;
1456 tm->h.n_cos_nodes--;
1457 }
1458 n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1459
1460 break;
1461 default:
1462 return -rte_tm_error_set(error,
1463 EINVAL,
1464 RTE_TM_ERROR_TYPE_LEVEL_ID,
1465 NULL,
1466 rte_strerror(EINVAL));
1467 }
1468
1469 return 0;
1470 }
1471
1472 static int
1473 ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1474 struct rte_tm_error *error)
1475 {
1476 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1477 uint32_t tm_id;
1478 struct ipn3ke_tm_node_list *nl;
1479 struct ipn3ke_tm_node *n, *parent_node;
1480
1481 tm_id = tm->tm_id;
1482
1483 nl = &tm->h.cos_commit_node_list;
1484 TAILQ_FOREACH(n, nl, node) {
1485 parent_node = n->parent_node;
1486 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1487 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1488 n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1489 n->tm_id != tm_id ||
1490 parent_node == NULL ||
1491 (parent_node &&
1492 parent_node->node_state ==
1493 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1494 (parent_node &&
1495 parent_node->node_state ==
1496 IPN3KE_TM_NODE_STATE_IDLE) ||
1497 n->shaper_profile.valid == 0) {
1498 return -rte_tm_error_set(error,
1499 EINVAL,
1500 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1501 NULL,
1502 rte_strerror(EINVAL));
1503 }
1504 } else if (n->node_state ==
1505 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1506 if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1507 n->n_children != 0) {
1508 return -rte_tm_error_set(error,
1509 EINVAL,
1510 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1511 NULL,
1512 rte_strerror(EINVAL));
1513 } else {
1514 return -rte_tm_error_set(error,
1515 EINVAL,
1516 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1517 NULL,
1518 rte_strerror(EINVAL));
1519 }
1520 }
1521 }
1522
1523 nl = &tm->h.vt_commit_node_list;
1524 TAILQ_FOREACH(n, nl, node) {
1525 parent_node = n->parent_node;
1526 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1527 if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1528 n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1529 n->tm_id != tm_id ||
1530 parent_node == NULL ||
1531 (parent_node &&
1532 parent_node->node_state ==
1533 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1534 (parent_node &&
1535 parent_node->node_state ==
1536 IPN3KE_TM_NODE_STATE_IDLE) ||
1537 n->shaper_profile.valid == 0) {
1538 return -rte_tm_error_set(error,
1539 EINVAL,
1540 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1541 NULL,
1542 rte_strerror(EINVAL));
1543 }
1544 } else if (n->node_state ==
1545 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1546 if (n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1547 n->n_children != 0) {
1548 return -rte_tm_error_set(error,
1549 EINVAL,
1550 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1551 NULL,
1552 rte_strerror(EINVAL));
1553 } else {
1554 return -rte_tm_error_set(error,
1555 EINVAL,
1556 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1557 NULL,
1558 rte_strerror(EINVAL));
1559 }
1560 }
1561 }
1562
1563 n = tm->h.port_commit_node;
1564 if (n &&
1565 (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1566 n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1567 n->tm_id != tm_id ||
1568 n->parent_node != NULL ||
1569 n->shaper_profile.valid == 0)) {
1570 return -rte_tm_error_set(error,
1571 EINVAL,
1572 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1573 NULL,
1574 rte_strerror(EINVAL));
1575 }
1576
1577 return 0;
1578 }
1579
1580 static int
1581 ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1582 struct ipn3ke_tm_node *n)
1583 {
1584 uint32_t level;
1585
1586 level = n->level;
1587
1588 switch (level) {
1589 case IPN3KE_TM_NODE_LEVEL_PORT:
1590 /**
1591 * Configure Type
1592 */
1593 IPN3KE_MASK_WRITE_REG(hw,
1594 IPN3KE_QOS_TYPE_L3_X,
1595 n->node_index,
1596 n->priority,
1597 IPN3KE_QOS_TYPE_MASK);
1598
1599 /**
1600 * Configure Sch_wt
1601 */
1602 IPN3KE_MASK_WRITE_REG(hw,
1603 IPN3KE_QOS_SCH_WT_L3_X,
1604 n->node_index,
1605 n->weight,
1606 IPN3KE_QOS_SCH_WT_MASK);
1607
1608 /**
1609 * Configure Shap_wt
1610 */
1611 if (n->shaper_profile.valid)
1612 IPN3KE_MASK_WRITE_REG(hw,
1613 IPN3KE_QOS_SHAP_WT_L3_X,
1614 n->node_index,
1615 ((n->shaper_profile.e << 10) |
1616 n->shaper_profile.m),
1617 IPN3KE_QOS_SHAP_WT_MASK);
1618
1619 break;
1620 case IPN3KE_TM_NODE_LEVEL_VT:
1621 /**
1622 * Configure Type
1623 */
1624 IPN3KE_MASK_WRITE_REG(hw,
1625 IPN3KE_QOS_TYPE_L2_X,
1626 n->node_index,
1627 n->priority,
1628 IPN3KE_QOS_TYPE_MASK);
1629
1630 /**
1631 * Configure Sch_wt
1632 */
1633 IPN3KE_MASK_WRITE_REG(hw,
1634 IPN3KE_QOS_SCH_WT_L2_X,
1635 n->node_index,
1636 n->weight,
1637 IPN3KE_QOS_SCH_WT_MASK);
1638
1639 /**
1640 * Configure Shap_wt
1641 */
1642 if (n->shaper_profile.valid)
1643 IPN3KE_MASK_WRITE_REG(hw,
1644 IPN3KE_QOS_SHAP_WT_L2_X,
1645 n->node_index,
1646 ((n->shaper_profile.e << 10) |
1647 n->shaper_profile.m),
1648 IPN3KE_QOS_SHAP_WT_MASK);
1649
1650 /**
1651 * Configure Map
1652 */
1653 IPN3KE_MASK_WRITE_REG(hw,
1654 IPN3KE_QOS_MAP_L2_X,
1655 n->node_index,
1656 n->parent_node->node_index,
1657 IPN3KE_QOS_MAP_L2_MASK);
1658
1659 break;
1660 case IPN3KE_TM_NODE_LEVEL_COS:
1661 /**
1662 * Configure Tail Drop mapping
1663 */
1664 if (n->tdrop_profile && n->tdrop_profile->valid) {
1665 IPN3KE_MASK_WRITE_REG(hw,
1666 IPN3KE_CCB_QPROFILE_Q,
1667 n->node_index,
1668 n->tdrop_profile->tdrop_profile_id,
1669 IPN3KE_CCB_QPROFILE_MASK);
1670 }
1671
1672 /**
1673 * Configure Type
1674 */
1675 IPN3KE_MASK_WRITE_REG(hw,
1676 IPN3KE_QOS_TYPE_L1_X,
1677 n->node_index,
1678 n->priority,
1679 IPN3KE_QOS_TYPE_MASK);
1680
1681 /**
1682 * Configure Sch_wt
1683 */
1684 IPN3KE_MASK_WRITE_REG(hw,
1685 IPN3KE_QOS_SCH_WT_L1_X,
1686 n->node_index,
1687 n->weight,
1688 IPN3KE_QOS_SCH_WT_MASK);
1689
1690 /**
1691 * Configure Shap_wt
1692 */
1693 if (n->shaper_profile.valid)
1694 IPN3KE_MASK_WRITE_REG(hw,
1695 IPN3KE_QOS_SHAP_WT_L1_X,
1696 n->node_index,
1697 ((n->shaper_profile.e << 10) |
1698 n->shaper_profile.m),
1699 IPN3KE_QOS_SHAP_WT_MASK);
1700
1701 /**
1702 * Configure COS queue to port
1703 */
1704 while (IPN3KE_MASK_READ_REG(hw,
1705 IPN3KE_QM_UID_CONFIG_CTRL,
1706 0,
1707 0x80000000))
1708 ;
1709
1710 IPN3KE_MASK_WRITE_REG(hw,
1711 IPN3KE_QM_UID_CONFIG_DATA,
1712 0,
1713 (1 << 8 | n->parent_node->parent_node->node_index),
1714 0x1FF);
1715
1716 IPN3KE_MASK_WRITE_REG(hw,
1717 IPN3KE_QM_UID_CONFIG_CTRL,
1718 0,
1719 n->node_index,
1720 0xFFFFF);
1721
1722 while (IPN3KE_MASK_READ_REG(hw,
1723 IPN3KE_QM_UID_CONFIG_CTRL,
1724 0,
1725 0x80000000))
1726 ;
1727
1728 /**
1729 * Configure Map
1730 */
1731 IPN3KE_MASK_WRITE_REG(hw,
1732 IPN3KE_QOS_MAP_L1_X,
1733 n->node_index,
1734 n->parent_node->node_index,
1735 IPN3KE_QOS_MAP_L1_MASK);
1736
1737 break;
1738 default:
1739 return -1;
1740 }
1741
1742 return 0;
1743 }
1744
1745 static int
1746 ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1747 struct rte_tm_error *error)
1748 {
1749 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1750 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1751 struct ipn3ke_tm_node_list *nl;
1752 struct ipn3ke_tm_node *n, *nn, *parent_node;
1753
1754 n = tm->h.port_commit_node;
1755 if (n) {
1756 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1757 tm->h.port_commit_node = NULL;
1758
1759 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1760 } else if (n->node_state ==
1761 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1762 tm->h.port_commit_node = NULL;
1763
1764 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1765 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1766 n->weight = 0;
1767 n->tm_id = RTE_TM_NODE_ID_NULL;
1768 } else {
1769 return -rte_tm_error_set(error,
1770 EINVAL,
1771 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1772 NULL,
1773 rte_strerror(EINVAL));
1774 }
1775 ipn3ke_hw_tm_node_wr(hw, n);
1776 }
1777
1778 nl = &tm->h.vt_commit_node_list;
1779 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1780 nn = TAILQ_NEXT(n, node);
1781 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1782 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1783 parent_node = n->parent_node;
1784 TAILQ_REMOVE(nl, n, node);
1785 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1786 n, node);
1787 } else if (n->node_state ==
1788 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1789 parent_node = n->parent_node;
1790 TAILQ_REMOVE(nl, n, node);
1791
1792 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1793 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1794 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1795 n->weight = 0;
1796 n->tm_id = RTE_TM_NODE_ID_NULL;
1797 n->parent_node = NULL;
1798 } else {
1799 return -rte_tm_error_set(error,
1800 EINVAL,
1801 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1802 NULL,
1803 rte_strerror(EINVAL));
1804 }
1805 ipn3ke_hw_tm_node_wr(hw, n);
1806 }
1807
1808 nl = &tm->h.cos_commit_node_list;
1809 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1810 nn = TAILQ_NEXT(n, node);
1811 if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1812 n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1813 parent_node = n->parent_node;
1814 TAILQ_REMOVE(nl, n, node);
1815 TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1816 n, node);
1817 } else if (n->node_state ==
1818 IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1819 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1820 parent_node = n->parent_node;
1821 TAILQ_REMOVE(nl, n, node);
1822
1823 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1824 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1825 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1826 n->weight = 0;
1827 n->tm_id = RTE_TM_NODE_ID_NULL;
1828 n->parent_node = NULL;
1829
1830 if (n->tdrop_profile)
1831 n->tdrop_profile->n_users--;
1832 } else {
1833 return -rte_tm_error_set(error,
1834 EINVAL,
1835 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1836 NULL,
1837 rte_strerror(EINVAL));
1838 }
1839 ipn3ke_hw_tm_node_wr(hw, n);
1840 }
1841
1842 return 0;
1843 }
1844
1845 static int
1846 ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1847 {
1848 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1849 struct ipn3ke_tm_node_list *nl;
1850 struct ipn3ke_tm_node *n;
1851 struct ipn3ke_tm_node *nn;
1852
1853 n = tm->h.port_commit_node;
1854 if (n) {
1855 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1856 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1857 n->weight = 0;
1858 n->tm_id = RTE_TM_NODE_ID_NULL;
1859 n->n_children = 0;
1860
1861 tm->h.port_commit_node = NULL;
1862 }
1863
1864 nl = &tm->h.vt_commit_node_list;
1865 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1866 nn = TAILQ_NEXT(n, node);
1867
1868 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1869 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1870 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1871 n->weight = 0;
1872 n->tm_id = RTE_TM_NODE_ID_NULL;
1873 n->parent_node = NULL;
1874 n->n_children = 0;
1875 tm->h.n_vt_nodes--;
1876
1877 TAILQ_REMOVE(nl, n, node);
1878 }
1879
1880 nl = &tm->h.cos_commit_node_list;
1881 for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1882 nn = TAILQ_NEXT(n, node);
1883
1884 n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1885 n->parent_node_id = RTE_TM_NODE_ID_NULL;
1886 n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1887 n->weight = 0;
1888 n->tm_id = RTE_TM_NODE_ID_NULL;
1889 n->parent_node = NULL;
1890 tm->h.n_cos_nodes--;
1891
1892 TAILQ_REMOVE(nl, n, node);
1893 }
1894
1895 return 0;
1896 }
1897
1898 static void
1899 ipn3ke_tm_show(struct rte_eth_dev *dev)
1900 {
1901 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1902 uint32_t tm_id;
1903 struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1904 struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1905 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1906 "CfgAdd",
1907 "CfgDel",
1908 "Committed"};
1909
1910 tm_id = tm->tm_id;
1911
1912 IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1913
1914 port_n = tm->h.port_node;
1915 IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1916 str_state[port_n->node_state]);
1917
1918 vt_nl = &tm->h.port_node->children_node_list;
1919 TAILQ_FOREACH(vt_n, vt_nl, node) {
1920 cos_nl = &vt_n->children_node_list;
1921 IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
1922 TAILQ_FOREACH(cos_n, cos_nl, node) {
1923 if (cos_n->parent_node_id !=
1924 (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1925 IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1926 cos_n->node_index,
1927 str_state[cos_n->node_state]);
1928 }
1929 IPN3KE_AFU_PMD_DEBUG("\n");
1930 }
1931 }
1932
1933 static void
1934 ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1935 {
1936 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1937 uint32_t tm_id;
1938 struct ipn3ke_tm_node_list *nl;
1939 struct ipn3ke_tm_node *n;
1940 const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1941 "CfgAdd",
1942 "CfgDel",
1943 "Committed"};
1944
1945 tm_id = tm->tm_id;
1946
1947 IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1948 n = tm->h.port_commit_node;
1949 IPN3KE_AFU_PMD_DEBUG("Port: ");
1950 if (n)
1951 IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1952 n->node_index,
1953 str_state[n->node_state]);
1954 IPN3KE_AFU_PMD_DEBUG("\n");
1955
1956 nl = &tm->h.vt_commit_node_list;
1957 IPN3KE_AFU_PMD_DEBUG("VT : ");
1958 TAILQ_FOREACH(n, nl, node) {
1959 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1960 n->node_index,
1961 str_state[n->node_state]);
1962 }
1963 IPN3KE_AFU_PMD_DEBUG("\n");
1964
1965 nl = &tm->h.cos_commit_node_list;
1966 IPN3KE_AFU_PMD_DEBUG("COS : ");
1967 TAILQ_FOREACH(n, nl, node) {
1968 IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1969 n->node_index,
1970 str_state[n->node_state]);
1971 }
1972 IPN3KE_AFU_PMD_DEBUG("\n");
1973 }
1974
1975 /* Traffic manager hierarchy commit */
1976 static int
1977 ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
1978 int clear_on_fail, struct rte_tm_error *error)
1979 {
1980 struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1981 int status;
1982
1983 /* Checks */
1984 if (tm->hierarchy_frozen)
1985 return -rte_tm_error_set(error,
1986 EBUSY,
1987 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1988 NULL,
1989 rte_strerror(EBUSY));
1990
1991 ipn3ke_tm_show_commmit(dev);
1992
1993 status = ipn3ke_tm_hierarchy_commit_check(dev, error);
1994 if (status) {
1995 if (clear_on_fail)
1996 ipn3ke_tm_hierarchy_commit_clear(dev);
1997 return status;
1998 }
1999
2000 ipn3ke_tm_hierarchy_hw_commit(dev, error);
2001 ipn3ke_tm_show(dev);
2002
2003 return 0;
2004 }
2005
2006 const struct rte_tm_ops ipn3ke_tm_ops = {
2007 .node_type_get = ipn3ke_pmd_tm_node_type_get,
2008 .capabilities_get = ipn3ke_tm_capabilities_get,
2009 .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
2010 .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
2011
2012 .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2013 .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2014 .shared_wred_context_add_update = NULL,
2015 .shared_wred_context_delete = NULL,
2016
2017 .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2018 .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2019 .shared_shaper_add_update = NULL,
2020 .shared_shaper_delete = NULL,
2021
2022 .node_add = ipn3ke_tm_node_add,
2023 .node_delete = ipn3ke_pmd_tm_node_delete,
2024 .node_suspend = NULL,
2025 .node_resume = NULL,
2026 .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2027
2028 .node_parent_update = NULL,
2029 .node_shaper_update = NULL,
2030 .node_shared_shaper_update = NULL,
2031 .node_stats_update = NULL,
2032 .node_wfq_weight_mode_update = NULL,
2033 .node_cman_update = NULL,
2034 .node_wred_context_update = NULL,
2035 .node_shared_wred_context_update = NULL,
2036
2037 .node_stats_read = NULL,
2038 };
2039
2040 int
2041 ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2042 void *arg)
2043 {
2044 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2045 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2046 struct rte_eth_dev *i40e_pf_eth;
2047 const struct rte_tm_ops *ops;
2048
2049 if (!arg)
2050 return -EINVAL;
2051
2052 if (hw->acc_tm) {
2053 *(const void **)arg = &ipn3ke_tm_ops;
2054 } else if (rpst->i40e_pf_eth) {
2055 i40e_pf_eth = rpst->i40e_pf_eth;
2056 if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2057 i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2058 &ops) != 0 ||
2059 ops == NULL) {
2060 return -EINVAL;
2061 }
2062 *(const void **)arg = ops;
2063 } else {
2064 return -EINVAL;
2065 }
2066
2067 return 0;
2068 }
2069