1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
10 #include <rte_debug.h>
12 /* The high bits in the xstats id is used to store an additional
13 * parameter (beyond the queue or port id already in the xstats
16 #define DSW_XSTATS_ID_PARAM_BITS (8)
17 #define DSW_XSTATS_ID_STAT_BITS \
18 (sizeof(unsigned int)*CHAR_BIT - DSW_XSTATS_ID_PARAM_BITS)
19 #define DSW_XSTATS_ID_STAT_MASK ((1 << DSW_XSTATS_ID_STAT_BITS) - 1)
21 #define DSW_XSTATS_ID_GET_PARAM(id) \
22 ((id)>>DSW_XSTATS_ID_STAT_BITS)
24 #define DSW_XSTATS_ID_GET_STAT(id) \
25 ((id) & DSW_XSTATS_ID_STAT_MASK)
27 #define DSW_XSTATS_ID_CREATE(id, param_value) \
28 (((param_value) << DSW_XSTATS_ID_STAT_BITS) | id)
31 uint64_t (*dsw_xstats_dev_get_value_fn
)(struct dsw_evdev
*dsw
);
33 struct dsw_xstat_dev
{
35 dsw_xstats_dev_get_value_fn get_value_fn
;
39 uint64_t (*dsw_xstats_port_get_value_fn
)(struct dsw_evdev
*dsw
,
40 uint8_t port_id
, uint8_t queue_id
);
42 struct dsw_xstats_port
{
44 dsw_xstats_port_get_value_fn get_value_fn
;
49 dsw_xstats_dev_credits_on_loan(struct dsw_evdev
*dsw
)
51 return rte_atomic32_read(&dsw
->credits_on_loan
);
54 static struct dsw_xstat_dev dsw_dev_xstats
[] = {
55 { "dev_credits_on_loan", dsw_xstats_dev_credits_on_loan
}
58 #define DSW_GEN_PORT_ACCESS_FN(_variable) \
60 dsw_xstats_port_get_ ## _variable(struct dsw_evdev *dsw, \
62 uint8_t queue_id __rte_unused) \
64 return dsw->ports[port_id]._variable; \
67 DSW_GEN_PORT_ACCESS_FN(new_enqueued
)
68 DSW_GEN_PORT_ACCESS_FN(forward_enqueued
)
69 DSW_GEN_PORT_ACCESS_FN(release_enqueued
)
72 dsw_xstats_port_get_queue_enqueued(struct dsw_evdev
*dsw
, uint8_t port_id
,
75 return dsw
->ports
[port_id
].queue_enqueued
[queue_id
];
78 DSW_GEN_PORT_ACCESS_FN(dequeued
)
81 dsw_xstats_port_get_queue_dequeued(struct dsw_evdev
*dsw
, uint8_t port_id
,
84 return dsw
->ports
[port_id
].queue_dequeued
[queue_id
];
87 DSW_GEN_PORT_ACCESS_FN(migrations
)
90 dsw_xstats_port_get_migration_latency(struct dsw_evdev
*dsw
, uint8_t port_id
,
91 uint8_t queue_id __rte_unused
)
93 uint64_t total_latency
= dsw
->ports
[port_id
].migration_latency
;
94 uint64_t num_migrations
= dsw
->ports
[port_id
].migrations
;
96 return num_migrations
> 0 ? total_latency
/ num_migrations
: 0;
100 dsw_xstats_port_get_event_proc_latency(struct dsw_evdev
*dsw
, uint8_t port_id
,
101 uint8_t queue_id __rte_unused
)
103 uint64_t total_busy_cycles
=
104 dsw
->ports
[port_id
].total_busy_cycles
;
106 dsw
->ports
[port_id
].dequeued
;
108 return dequeued
> 0 ? total_busy_cycles
/ dequeued
: 0;
111 DSW_GEN_PORT_ACCESS_FN(inflight_credits
)
114 dsw_xstats_port_get_load(struct dsw_evdev
*dsw
, uint8_t port_id
,
115 uint8_t queue_id __rte_unused
)
119 load
= rte_atomic16_read(&dsw
->ports
[port_id
].load
);
121 return DSW_LOAD_TO_PERCENT(load
);
124 DSW_GEN_PORT_ACCESS_FN(last_bg
)
126 static struct dsw_xstats_port dsw_port_xstats
[] = {
127 { "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued
,
129 { "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued
,
131 { "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued
,
133 { "port_%u_queue_%u_enqueued", dsw_xstats_port_get_queue_enqueued
,
135 { "port_%u_dequeued", dsw_xstats_port_get_dequeued
,
137 { "port_%u_queue_%u_dequeued", dsw_xstats_port_get_queue_dequeued
,
139 { "port_%u_migrations", dsw_xstats_port_get_migrations
,
141 { "port_%u_migration_latency", dsw_xstats_port_get_migration_latency
,
143 { "port_%u_event_proc_latency", dsw_xstats_port_get_event_proc_latency
,
145 { "port_%u_inflight_credits", dsw_xstats_port_get_inflight_credits
,
147 { "port_%u_load", dsw_xstats_port_get_load
,
149 { "port_%u_last_bg", dsw_xstats_port_get_last_bg
,
154 dsw_xstats_dev_get_names(struct rte_event_dev_xstats_name
*xstats_names
,
155 unsigned int *ids
, unsigned int size
)
159 for (i
= 0; i
< RTE_DIM(dsw_dev_xstats
) && i
< size
; i
++) {
161 strcpy(xstats_names
[i
].name
, dsw_dev_xstats
[i
].name
);
168 dsw_xstats_port_get_names(struct dsw_evdev
*dsw
, uint8_t port_id
,
169 struct rte_event_dev_xstats_name
*xstats_names
,
170 unsigned int *ids
, unsigned int size
)
172 uint8_t queue_id
= 0;
174 unsigned int stat_idx
;
176 for (id_idx
= 0, stat_idx
= 0;
177 id_idx
< size
&& stat_idx
< RTE_DIM(dsw_port_xstats
);
179 struct dsw_xstats_port
*xstat
= &dsw_port_xstats
[stat_idx
];
181 if (xstat
->per_queue
) {
182 ids
[id_idx
] = DSW_XSTATS_ID_CREATE(stat_idx
, queue_id
);
183 snprintf(xstats_names
[id_idx
].name
,
184 RTE_EVENT_DEV_XSTATS_NAME_SIZE
,
185 dsw_port_xstats
[stat_idx
].name_fmt
, port_id
,
189 ids
[id_idx
] = stat_idx
;
190 snprintf(xstats_names
[id_idx
].name
,
191 RTE_EVENT_DEV_XSTATS_NAME_SIZE
,
192 dsw_port_xstats
[stat_idx
].name_fmt
, port_id
);
195 if (!(xstat
->per_queue
&& queue_id
< dsw
->num_queues
)) {
204 dsw_xstats_get_names(const struct rte_eventdev
*dev
,
205 enum rte_event_dev_xstats_mode mode
,
206 uint8_t queue_port_id
,
207 struct rte_event_dev_xstats_name
*xstats_names
,
208 unsigned int *ids
, unsigned int size
)
210 struct dsw_evdev
*dsw
= dsw_pmd_priv(dev
);
213 case RTE_EVENT_DEV_XSTATS_DEVICE
:
214 return dsw_xstats_dev_get_names(xstats_names
, ids
, size
);
215 case RTE_EVENT_DEV_XSTATS_PORT
:
216 return dsw_xstats_port_get_names(dsw
, queue_port_id
,
217 xstats_names
, ids
, size
);
218 case RTE_EVENT_DEV_XSTATS_QUEUE
:
227 dsw_xstats_dev_get(const struct rte_eventdev
*dev
,
228 const unsigned int ids
[], uint64_t values
[], unsigned int n
)
230 struct dsw_evdev
*dsw
= dsw_pmd_priv(dev
);
233 for (i
= 0; i
< n
; i
++) {
234 unsigned int id
= ids
[i
];
235 struct dsw_xstat_dev
*xstat
= &dsw_dev_xstats
[id
];
236 values
[i
] = xstat
->get_value_fn(dsw
);
242 dsw_xstats_port_get(const struct rte_eventdev
*dev
, uint8_t port_id
,
243 const unsigned int ids
[], uint64_t values
[], unsigned int n
)
245 struct dsw_evdev
*dsw
= dsw_pmd_priv(dev
);
248 for (i
= 0; i
< n
; i
++) {
249 unsigned int id
= ids
[i
];
250 unsigned int stat_idx
= DSW_XSTATS_ID_GET_STAT(id
);
251 struct dsw_xstats_port
*xstat
= &dsw_port_xstats
[stat_idx
];
252 uint8_t queue_id
= 0;
254 if (xstat
->per_queue
)
255 queue_id
= DSW_XSTATS_ID_GET_PARAM(id
);
257 values
[i
] = xstat
->get_value_fn(dsw
, port_id
, queue_id
);
263 dsw_xstats_get(const struct rte_eventdev
*dev
,
264 enum rte_event_dev_xstats_mode mode
, uint8_t queue_port_id
,
265 const unsigned int ids
[], uint64_t values
[], unsigned int n
)
268 case RTE_EVENT_DEV_XSTATS_DEVICE
:
269 return dsw_xstats_dev_get(dev
, ids
, values
, n
);
270 case RTE_EVENT_DEV_XSTATS_PORT
:
271 return dsw_xstats_port_get(dev
, queue_port_id
, ids
, values
, n
);
272 case RTE_EVENT_DEV_XSTATS_QUEUE
:
281 uint64_t dsw_xstats_get_by_name(const struct rte_eventdev
*dev
,
282 const char *name
, unsigned int *id
)