1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_event_ring.h>
17 /* device instance specific */
26 poll_return
, /* for zero-count and used also for port bucket loop */
29 /* qid port mapping specific */
31 pkts
, /* note: qid-to-port pkts */
34 typedef uint64_t (*xstats_fn
)(const struct sw_evdev
*dev
,
35 uint16_t obj_idx
, /* port or queue id */
36 enum xstats_type stat
, int extra_arg
);
38 struct sw_xstats_entry
{
39 struct rte_event_dev_xstats_name name
;
42 enum xstats_type stat
;
43 enum rte_event_dev_xstats_mode mode
;
45 uint8_t reset_allowed
; /* when set, this value can be reset */
46 uint64_t reset_value
; /* an offset to be taken away to emulate resets */
50 get_dev_stat(const struct sw_evdev
*sw
, uint16_t obj_idx __rte_unused
,
51 enum xstats_type type
, int extra_arg __rte_unused
)
54 case rx
: return sw
->stats
.rx_pkts
;
55 case tx
: return sw
->stats
.tx_pkts
;
56 case dropped
: return sw
->stats
.rx_dropped
;
57 case calls
: return sw
->sched_called
;
58 case no_iq_enq
: return sw
->sched_no_iq_enqueues
;
59 case no_cq_enq
: return sw
->sched_no_cq_enqueues
;
65 get_port_stat(const struct sw_evdev
*sw
, uint16_t obj_idx
,
66 enum xstats_type type
, int extra_arg __rte_unused
)
68 const struct sw_port
*p
= &sw
->ports
[obj_idx
];
71 case rx
: return p
->stats
.rx_pkts
;
72 case tx
: return p
->stats
.tx_pkts
;
73 case dropped
: return p
->stats
.rx_dropped
;
74 case inflight
: return p
->inflights
;
75 case pkt_cycles
: return p
->avg_pkt_ticks
;
76 case calls
: return p
->total_polls
;
77 case credits
: return p
->inflight_credits
;
78 case poll_return
: return p
->zero_polls
;
79 case rx_used
: return rte_event_ring_count(p
->rx_worker_ring
);
80 case rx_free
: return rte_event_ring_free_count(p
->rx_worker_ring
);
81 case tx_used
: return rte_event_ring_count(p
->cq_worker_ring
);
82 case tx_free
: return rte_event_ring_free_count(p
->cq_worker_ring
);
88 get_port_bucket_stat(const struct sw_evdev
*sw
, uint16_t obj_idx
,
89 enum xstats_type type
, int extra_arg
)
91 const struct sw_port
*p
= &sw
->ports
[obj_idx
];
94 case poll_return
: return p
->poll_buckets
[extra_arg
];
100 get_qid_stat(const struct sw_evdev
*sw
, uint16_t obj_idx
,
101 enum xstats_type type
, int extra_arg __rte_unused
)
103 const struct sw_qid
*qid
= &sw
->qids
[obj_idx
];
106 case rx
: return qid
->stats
.rx_pkts
;
107 case tx
: return qid
->stats
.tx_pkts
;
108 case dropped
: return qid
->stats
.rx_dropped
;
113 for (i
= 0; i
< RTE_DIM(qid
->fids
); i
++)
114 infl
+= qid
->fids
[i
].pcount
;
123 get_qid_iq_stat(const struct sw_evdev
*sw
, uint16_t obj_idx
,
124 enum xstats_type type
, int extra_arg
)
126 const struct sw_qid
*qid
= &sw
->qids
[obj_idx
];
127 const int iq_idx
= extra_arg
;
130 case iq_used
: return iq_count(&qid
->iq
[iq_idx
]);
136 get_qid_port_stat(const struct sw_evdev
*sw
, uint16_t obj_idx
,
137 enum xstats_type type
, int extra_arg
)
139 const struct sw_qid
*qid
= &sw
->qids
[obj_idx
];
140 uint16_t port
= extra_arg
;
147 for (i
= 0; i
< RTE_DIM(qid
->fids
); i
++)
148 if (qid
->fids
[i
].cq
== port
)
154 return qid
->to_port
[port
];
160 sw_xstats_init(struct sw_evdev
*sw
)
163 * define the stats names and types. Used to build up the device
165 * There are multiple set of stats:
168 * - per-port-dequeue-burst-sizes
173 * For each of these sets, we have three parallel arrays, one for the
174 * names, the other for the stat type parameter to be passed in the fn
175 * call to get that stat. The third array allows resetting or not.
176 * All these arrays must be kept in sync
178 static const char * const dev_stats
[] = { "rx", "tx", "drop",
179 "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
181 static const enum xstats_type dev_types
[] = { rx
, tx
, dropped
,
182 calls
, no_iq_enq
, no_cq_enq
,
184 /* all device stats are allowed to be reset */
186 static const char * const port_stats
[] = {"rx", "tx", "drop",
187 "inflight", "avg_pkt_cycles", "credits",
188 "rx_ring_used", "rx_ring_free",
189 "cq_ring_used", "cq_ring_free",
190 "dequeue_calls", "dequeues_returning_0",
192 static const enum xstats_type port_types
[] = { rx
, tx
, dropped
,
193 inflight
, pkt_cycles
, credits
,
194 rx_used
, rx_free
, tx_used
, tx_free
,
197 static const uint8_t port_reset_allowed
[] = {1, 1, 1,
203 static const char * const port_bucket_stats
[] = {
204 "dequeues_returning" };
205 static const enum xstats_type port_bucket_types
[] = { poll_return
};
206 /* all bucket dequeues are allowed to be reset, handled in loop below */
208 static const char * const qid_stats
[] = {"rx", "tx", "drop",
211 static const enum xstats_type qid_types
[] = { rx
, tx
, dropped
,
214 static const uint8_t qid_reset_allowed
[] = {1, 1, 1,
218 static const char * const qid_iq_stats
[] = { "used" };
219 static const enum xstats_type qid_iq_types
[] = { iq_used
};
222 static const char * const qid_port_stats
[] = { "pinned_flows",
225 static const enum xstats_type qid_port_types
[] = { pinned
, pkts
};
226 static const uint8_t qid_port_reset_allowed
[] = {0, 1};
228 /* ---- end of stat definitions ---- */
230 /* check sizes, since a missed comma can lead to strings being
231 * joined by the compiler.
233 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats
) != RTE_DIM(dev_types
));
234 RTE_BUILD_BUG_ON(RTE_DIM(port_stats
) != RTE_DIM(port_types
));
235 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats
) != RTE_DIM(qid_types
));
236 RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats
) != RTE_DIM(qid_iq_types
));
237 RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats
) != RTE_DIM(qid_port_types
));
238 RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats
) !=
239 RTE_DIM(port_bucket_types
));
241 RTE_BUILD_BUG_ON(RTE_DIM(port_stats
) != RTE_DIM(port_reset_allowed
));
242 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats
) != RTE_DIM(qid_reset_allowed
));
245 const uint32_t cons_bkt_shift
=
246 (MAX_SW_CONS_Q_DEPTH
>> SW_DEQ_STAT_BUCKET_SHIFT
);
247 const unsigned int count
= RTE_DIM(dev_stats
) +
248 sw
->port_count
* RTE_DIM(port_stats
) +
249 sw
->port_count
* RTE_DIM(port_bucket_stats
) *
250 (cons_bkt_shift
+ 1) +
251 sw
->qid_count
* RTE_DIM(qid_stats
) +
252 sw
->qid_count
* SW_IQS_MAX
* RTE_DIM(qid_iq_stats
) +
253 sw
->qid_count
* sw
->port_count
*
254 RTE_DIM(qid_port_stats
);
255 unsigned int i
, port
, qid
, iq
, bkt
, stat
= 0;
257 sw
->xstats
= rte_zmalloc_socket(NULL
, sizeof(sw
->xstats
[0]) * count
, 0,
258 sw
->data
->socket_id
);
259 if (sw
->xstats
== NULL
)
262 #define sname sw->xstats[stat].name.name
263 for (i
= 0; i
< RTE_DIM(dev_stats
); i
++, stat
++) {
264 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
266 .stat
= dev_types
[i
],
267 .mode
= RTE_EVENT_DEV_XSTATS_DEVICE
,
270 snprintf(sname
, sizeof(sname
), "dev_%s", dev_stats
[i
]);
272 sw
->xstats_count_mode_dev
= stat
;
274 for (port
= 0; port
< sw
->port_count
; port
++) {
275 sw
->xstats_offset_for_port
[port
] = stat
;
277 uint32_t count_offset
= stat
;
279 for (i
= 0; i
< RTE_DIM(port_stats
); i
++, stat
++) {
280 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
283 .stat
= port_types
[i
],
284 .mode
= RTE_EVENT_DEV_XSTATS_PORT
,
285 .reset_allowed
= port_reset_allowed
[i
],
287 snprintf(sname
, sizeof(sname
), "port_%u_%s",
288 port
, port_stats
[i
]);
291 for (bkt
= 0; bkt
< (rte_event_ring_get_capacity(
292 sw
->ports
[port
].cq_worker_ring
) >>
293 SW_DEQ_STAT_BUCKET_SHIFT
) + 1; bkt
++) {
294 for (i
= 0; i
< RTE_DIM(port_bucket_stats
); i
++) {
295 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
296 .fn
= get_port_bucket_stat
,
298 .stat
= port_bucket_types
[i
],
299 .mode
= RTE_EVENT_DEV_XSTATS_PORT
,
303 snprintf(sname
, sizeof(sname
),
305 port
, port_bucket_stats
[i
],
306 (bkt
<< SW_DEQ_STAT_BUCKET_SHIFT
) + 1,
307 (bkt
+ 1) << SW_DEQ_STAT_BUCKET_SHIFT
);
312 sw
->xstats_count_per_port
[port
] = stat
- count_offset
;
315 sw
->xstats_count_mode_port
= stat
- sw
->xstats_count_mode_dev
;
317 for (qid
= 0; qid
< sw
->qid_count
; qid
++) {
318 uint32_t count_offset
= stat
;
319 sw
->xstats_offset_for_qid
[qid
] = stat
;
321 for (i
= 0; i
< RTE_DIM(qid_stats
); i
++, stat
++) {
322 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
325 .stat
= qid_types
[i
],
326 .mode
= RTE_EVENT_DEV_XSTATS_QUEUE
,
327 .reset_allowed
= qid_reset_allowed
[i
],
329 snprintf(sname
, sizeof(sname
), "qid_%u_%s",
332 for (iq
= 0; iq
< SW_IQS_MAX
; iq
++)
333 for (i
= 0; i
< RTE_DIM(qid_iq_stats
); i
++, stat
++) {
334 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
335 .fn
= get_qid_iq_stat
,
337 .stat
= qid_iq_types
[i
],
338 .mode
= RTE_EVENT_DEV_XSTATS_QUEUE
,
342 snprintf(sname
, sizeof(sname
),
348 for (port
= 0; port
< sw
->port_count
; port
++)
349 for (i
= 0; i
< RTE_DIM(qid_port_stats
); i
++, stat
++) {
350 sw
->xstats
[stat
] = (struct sw_xstats_entry
){
351 .fn
= get_qid_port_stat
,
353 .stat
= qid_port_types
[i
],
354 .mode
= RTE_EVENT_DEV_XSTATS_QUEUE
,
357 qid_port_reset_allowed
[i
],
359 snprintf(sname
, sizeof(sname
),
365 sw
->xstats_count_per_qid
[qid
] = stat
- count_offset
;
368 sw
->xstats_count_mode_queue
= stat
-
369 (sw
->xstats_count_mode_dev
+ sw
->xstats_count_mode_port
);
372 sw
->xstats_count
= stat
;
378 sw_xstats_uninit(struct sw_evdev
*sw
)
380 rte_free(sw
->xstats
);
381 sw
->xstats_count
= 0;
386 sw_xstats_get_names(const struct rte_eventdev
*dev
,
387 enum rte_event_dev_xstats_mode mode
, uint8_t queue_port_id
,
388 struct rte_event_dev_xstats_name
*xstats_names
,
389 unsigned int *ids
, unsigned int size
)
391 const struct sw_evdev
*sw
= sw_pmd_priv_const(dev
);
393 unsigned int xidx
= 0;
395 RTE_SET_USED(queue_port_id
);
397 uint32_t xstats_mode_count
= 0;
398 uint32_t start_offset
= 0;
401 case RTE_EVENT_DEV_XSTATS_DEVICE
:
402 xstats_mode_count
= sw
->xstats_count_mode_dev
;
404 case RTE_EVENT_DEV_XSTATS_PORT
:
405 if (queue_port_id
>= (signed int)sw
->port_count
)
407 xstats_mode_count
= sw
->xstats_count_per_port
[queue_port_id
];
408 start_offset
= sw
->xstats_offset_for_port
[queue_port_id
];
410 case RTE_EVENT_DEV_XSTATS_QUEUE
:
411 if (queue_port_id
>= (signed int)sw
->qid_count
)
413 xstats_mode_count
= sw
->xstats_count_per_qid
[queue_port_id
];
414 start_offset
= sw
->xstats_offset_for_qid
[queue_port_id
];
417 SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
421 if (xstats_mode_count
> size
|| !ids
|| !xstats_names
)
422 return xstats_mode_count
;
424 for (i
= 0; i
< sw
->xstats_count
&& xidx
< size
; i
++) {
425 if (sw
->xstats
[i
].mode
!= mode
)
428 if (mode
!= RTE_EVENT_DEV_XSTATS_DEVICE
&&
429 queue_port_id
!= sw
->xstats
[i
].obj_idx
)
432 xstats_names
[xidx
] = sw
->xstats
[i
].name
;
434 ids
[xidx
] = start_offset
+ xidx
;
441 sw_xstats_update(struct sw_evdev
*sw
, enum rte_event_dev_xstats_mode mode
,
442 uint8_t queue_port_id
, const unsigned int ids
[],
443 uint64_t values
[], unsigned int n
, const uint32_t reset
,
444 const uint32_t ret_if_n_lt_nstats
)
447 unsigned int xidx
= 0;
449 RTE_SET_USED(queue_port_id
);
451 uint32_t xstats_mode_count
= 0;
454 case RTE_EVENT_DEV_XSTATS_DEVICE
:
455 xstats_mode_count
= sw
->xstats_count_mode_dev
;
457 case RTE_EVENT_DEV_XSTATS_PORT
:
458 if (queue_port_id
>= (signed int)sw
->port_count
)
460 xstats_mode_count
= sw
->xstats_count_per_port
[queue_port_id
];
462 case RTE_EVENT_DEV_XSTATS_QUEUE
:
463 if (queue_port_id
>= (signed int)sw
->qid_count
)
465 xstats_mode_count
= sw
->xstats_count_per_qid
[queue_port_id
];
468 SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
472 /* this function can check num stats and return them (xstats_get() style
473 * behaviour) or ignore n for reset() of a single stat style behaviour.
475 if (ret_if_n_lt_nstats
&& xstats_mode_count
> n
)
476 return xstats_mode_count
;
478 for (i
= 0; i
< n
&& xidx
< xstats_mode_count
; i
++) {
479 struct sw_xstats_entry
*xs
= &sw
->xstats
[ids
[i
]];
480 if (ids
[i
] > sw
->xstats_count
|| xs
->mode
!= mode
)
483 if (mode
!= RTE_EVENT_DEV_XSTATS_DEVICE
&&
484 queue_port_id
!= xs
->obj_idx
)
487 uint64_t val
= xs
->fn(sw
, xs
->obj_idx
, xs
->stat
, xs
->extra_arg
)
493 if (xs
->reset_allowed
&& reset
)
494 xs
->reset_value
= val
;
505 sw_xstats_get(const struct rte_eventdev
*dev
,
506 enum rte_event_dev_xstats_mode mode
, uint8_t queue_port_id
,
507 const unsigned int ids
[], uint64_t values
[], unsigned int n
)
509 struct sw_evdev
*sw
= sw_pmd_priv(dev
);
510 const uint32_t reset
= 0;
511 const uint32_t ret_n_lt_stats
= 0;
512 return sw_xstats_update(sw
, mode
, queue_port_id
, ids
, values
, n
,
513 reset
, ret_n_lt_stats
);
517 sw_xstats_get_by_name(const struct rte_eventdev
*dev
,
518 const char *name
, unsigned int *id
)
520 const struct sw_evdev
*sw
= sw_pmd_priv_const(dev
);
523 for (i
= 0; i
< sw
->xstats_count
; i
++) {
524 struct sw_xstats_entry
*xs
= &sw
->xstats
[i
];
525 if (strncmp(xs
->name
.name
, name
,
526 RTE_EVENT_DEV_XSTATS_NAME_SIZE
) == 0){
529 return xs
->fn(sw
, xs
->obj_idx
, xs
->stat
, xs
->extra_arg
)
539 sw_xstats_reset_range(struct sw_evdev
*sw
, uint32_t start
, uint32_t num
)
542 for (i
= start
; i
< start
+ num
; i
++) {
543 struct sw_xstats_entry
*xs
= &sw
->xstats
[i
];
544 if (!xs
->reset_allowed
)
547 uint64_t val
= xs
->fn(sw
, xs
->obj_idx
, xs
->stat
, xs
->extra_arg
)
549 xs
->reset_value
= val
;
554 sw_xstats_reset_queue(struct sw_evdev
*sw
, uint8_t queue_id
,
555 const uint32_t ids
[], uint32_t nb_ids
)
557 const uint32_t reset
= 1;
558 const uint32_t ret_n_lt_stats
= 0;
560 uint32_t nb_reset
= sw_xstats_update(sw
,
561 RTE_EVENT_DEV_XSTATS_QUEUE
,
562 queue_id
, ids
, NULL
, nb_ids
,
563 reset
, ret_n_lt_stats
);
564 return nb_reset
== nb_ids
? 0 : -EINVAL
;
568 sw_xstats_reset_range(sw
, sw
->xstats_offset_for_qid
[queue_id
],
569 sw
->xstats_count_per_qid
[queue_id
]);
575 sw_xstats_reset_port(struct sw_evdev
*sw
, uint8_t port_id
,
576 const uint32_t ids
[], uint32_t nb_ids
)
578 const uint32_t reset
= 1;
579 const uint32_t ret_n_lt_stats
= 0;
580 int offset
= sw
->xstats_offset_for_port
[port_id
];
581 int nb_stat
= sw
->xstats_count_per_port
[port_id
];
584 uint32_t nb_reset
= sw_xstats_update(sw
,
585 RTE_EVENT_DEV_XSTATS_PORT
, port_id
,
587 reset
, ret_n_lt_stats
);
588 return nb_reset
== nb_ids
? 0 : -EINVAL
;
591 sw_xstats_reset_range(sw
, offset
, nb_stat
);
596 sw_xstats_reset_dev(struct sw_evdev
*sw
, const uint32_t ids
[], uint32_t nb_ids
)
600 for (i
= 0; i
< nb_ids
; i
++) {
601 uint32_t id
= ids
[i
];
602 if (id
>= sw
->xstats_count_mode_dev
)
604 sw_xstats_reset_range(sw
, id
, 1);
607 for (i
= 0; i
< sw
->xstats_count_mode_dev
; i
++)
608 sw_xstats_reset_range(sw
, i
, 1);
615 sw_xstats_reset(struct rte_eventdev
*dev
,
616 enum rte_event_dev_xstats_mode mode
,
617 int16_t queue_port_id
,
618 const uint32_t ids
[],
621 struct sw_evdev
*sw
= sw_pmd_priv(dev
);
624 /* handle -1 for queue_port_id here, looping over all ports/queues */
626 case RTE_EVENT_DEV_XSTATS_DEVICE
:
627 sw_xstats_reset_dev(sw
, ids
, nb_ids
);
629 case RTE_EVENT_DEV_XSTATS_PORT
:
630 if (queue_port_id
== -1) {
631 for (i
= 0; i
< sw
->port_count
; i
++) {
632 err
= sw_xstats_reset_port(sw
, i
, ids
, nb_ids
);
636 } else if (queue_port_id
< (int16_t)sw
->port_count
)
637 sw_xstats_reset_port(sw
, queue_port_id
, ids
, nb_ids
);
639 case RTE_EVENT_DEV_XSTATS_QUEUE
:
640 if (queue_port_id
== -1) {
641 for (i
= 0; i
< sw
->qid_count
; i
++) {
642 err
= sw_xstats_reset_queue(sw
, i
, ids
, nb_ids
);
646 } else if (queue_port_id
< (int16_t)sw
->qid_count
)
647 sw_xstats_reset_queue(sw
, queue_port_id
, ids
, nb_ids
);