4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/types.h>
40 #include <rte_cycles.h>
41 #include <rte_ethdev.h>
42 #include <rte_metrics.h>
43 #include <rte_memzone.h>
44 #include <rte_lcore.h>
46 #include "rte_latencystats.h"
48 /** Nano seconds per second */
49 #define NS_PER_SEC 1E9
51 /** Clock cycles per nano second */
53 latencystat_cycles_per_ns(void)
55 return rte_get_timer_hz() / NS_PER_SEC
;
58 /* Macros for printing using RTE_LOG */
59 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
61 static const char *MZ_RTE_LATENCY_STATS
= "rte_latencystats";
62 static int latency_stats_index
;
63 static uint64_t samp_intvl
;
64 static uint64_t timer_tsc
;
65 static uint64_t prev_tsc
;
67 struct rte_latency_stats
{
68 float min_latency
; /**< Minimum latency in nano seconds */
69 float avg_latency
; /**< Average latency in nano seconds */
70 float max_latency
; /**< Maximum latency in nano seconds */
71 float jitter
; /** Latency variation */
74 static struct rte_latency_stats
*glob_stats
;
77 struct rte_eth_rxtx_callback
*cb
;
80 static struct rxtx_cbs rx_cbs
[RTE_MAX_ETHPORTS
][RTE_MAX_QUEUES_PER_PORT
];
81 static struct rxtx_cbs tx_cbs
[RTE_MAX_ETHPORTS
][RTE_MAX_QUEUES_PER_PORT
];
83 struct latency_stats_nameoff
{
84 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
88 static const struct latency_stats_nameoff lat_stats_strings
[] = {
89 {"min_latency_ns", offsetof(struct rte_latency_stats
, min_latency
)},
90 {"avg_latency_ns", offsetof(struct rte_latency_stats
, avg_latency
)},
91 {"max_latency_ns", offsetof(struct rte_latency_stats
, max_latency
)},
92 {"jitter_ns", offsetof(struct rte_latency_stats
, jitter
)},
95 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
96 sizeof(lat_stats_strings[0]))
99 rte_latencystats_update(void)
102 float *stats_ptr
= NULL
;
103 uint64_t values
[NUM_LATENCY_STATS
] = {0};
106 for (i
= 0; i
< NUM_LATENCY_STATS
; i
++) {
107 stats_ptr
= RTE_PTR_ADD(glob_stats
,
108 lat_stats_strings
[i
].offset
);
109 values
[i
] = (uint64_t)floor((*stats_ptr
)/
110 latencystat_cycles_per_ns());
113 ret
= rte_metrics_update_values(RTE_METRICS_GLOBAL
,
115 values
, NUM_LATENCY_STATS
);
117 RTE_LOG(INFO
, LATENCY_STATS
, "Failed to push the stats\n");
123 rte_latencystats_fill_values(struct rte_metric_value
*values
)
126 float *stats_ptr
= NULL
;
128 for (i
= 0; i
< NUM_LATENCY_STATS
; i
++) {
129 stats_ptr
= RTE_PTR_ADD(glob_stats
,
130 lat_stats_strings
[i
].offset
);
132 values
[i
].value
= (uint64_t)floor((*stats_ptr
)/
133 latencystat_cycles_per_ns());
138 add_time_stamps(uint8_t pid __rte_unused
,
139 uint16_t qid __rte_unused
,
140 struct rte_mbuf
**pkts
,
142 uint16_t max_pkts __rte_unused
,
143 void *user_cb __rte_unused
)
146 uint64_t diff_tsc
, now
;
149 * For every sample interval,
150 * time stamp is marked on one received packet.
153 for (i
= 0; i
< nb_pkts
; i
++) {
154 diff_tsc
= now
- prev_tsc
;
155 timer_tsc
+= diff_tsc
;
156 if (timer_tsc
>= samp_intvl
) {
157 pkts
[i
]->timestamp
= now
;
168 calc_latency(uint8_t pid __rte_unused
,
169 uint16_t qid __rte_unused
,
170 struct rte_mbuf
**pkts
,
172 void *_ __rte_unused
)
174 unsigned int i
, cnt
= 0;
176 float latency
[nb_pkts
];
177 static float prev_latency
;
179 * Alpha represents degree of weighting decrease in EWMA,
180 * a constant smoothing factor between 0 and 1. The value
181 * is used below for measuring average latency.
183 const float alpha
= 0.2;
186 for (i
= 0; i
< nb_pkts
; i
++) {
187 if (pkts
[i
]->timestamp
)
188 latency
[cnt
++] = now
- pkts
[i
]->timestamp
;
191 for (i
= 0; i
< cnt
; i
++) {
193 * The jitter is calculated as statistical mean of interpacket
194 * delay variation. The "jitter estimate" is computed by taking
195 * the absolute values of the ipdv sequence and applying an
196 * exponential filter with parameter 1/16 to generate the
197 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
198 * D(i-1,i) is difference in latency of two consecutive packets
200 * Reference: Calculated as per RFC 5481, sec 4.1,
201 * RFC 3393 sec 4.5, RFC 1889 sec.
203 glob_stats
->jitter
+= (fabsf(prev_latency
- latency
[i
])
204 - glob_stats
->jitter
)/16;
205 if (glob_stats
->min_latency
== 0)
206 glob_stats
->min_latency
= latency
[i
];
207 else if (latency
[i
] < glob_stats
->min_latency
)
208 glob_stats
->min_latency
= latency
[i
];
209 else if (latency
[i
] > glob_stats
->max_latency
)
210 glob_stats
->max_latency
= latency
[i
];
212 * The average latency is measured using exponential moving
213 * average, i.e. using EWMA
214 * https://en.wikipedia.org/wiki/Moving_average
216 glob_stats
->avg_latency
+=
217 alpha
* (latency
[i
] - glob_stats
->avg_latency
);
218 prev_latency
= latency
[i
];
225 rte_latencystats_init(uint64_t app_samp_intvl
,
226 rte_latency_stats_flow_type_fn user_cb
)
231 struct rxtx_cbs
*cbs
= NULL
;
232 const uint8_t nb_ports
= rte_eth_dev_count();
233 const char *ptr_strings
[NUM_LATENCY_STATS
] = {0};
234 const struct rte_memzone
*mz
= NULL
;
235 const unsigned int flags
= 0;
237 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS
))
240 /** Allocate stats in shared memory fo multi process support */
241 mz
= rte_memzone_reserve(MZ_RTE_LATENCY_STATS
, sizeof(*glob_stats
),
242 rte_socket_id(), flags
);
244 RTE_LOG(ERR
, LATENCY_STATS
, "Cannot reserve memory: %s:%d\n",
249 glob_stats
= mz
->addr
;
250 samp_intvl
= app_samp_intvl
* latencystat_cycles_per_ns();
252 /** Register latency stats with stats library */
253 for (i
= 0; i
< NUM_LATENCY_STATS
; i
++)
254 ptr_strings
[i
] = lat_stats_strings
[i
].name
;
256 latency_stats_index
= rte_metrics_reg_names(ptr_strings
,
258 if (latency_stats_index
< 0) {
259 RTE_LOG(DEBUG
, LATENCY_STATS
,
260 "Failed to register latency stats names\n");
264 /** Register Rx/Tx callbacks */
265 for (pid
= 0; pid
< nb_ports
; pid
++) {
266 struct rte_eth_dev_info dev_info
;
267 rte_eth_dev_info_get(pid
, &dev_info
);
268 for (qid
= 0; qid
< dev_info
.nb_rx_queues
; qid
++) {
269 cbs
= &rx_cbs
[pid
][qid
];
270 cbs
->cb
= rte_eth_add_first_rx_callback(pid
, qid
,
271 add_time_stamps
, user_cb
);
273 RTE_LOG(INFO
, LATENCY_STATS
, "Failed to "
274 "register Rx callback for pid=%d, "
275 "qid=%d\n", pid
, qid
);
277 for (qid
= 0; qid
< dev_info
.nb_tx_queues
; qid
++) {
278 cbs
= &tx_cbs
[pid
][qid
];
279 cbs
->cb
= rte_eth_add_tx_callback(pid
, qid
,
280 calc_latency
, user_cb
);
282 RTE_LOG(INFO
, LATENCY_STATS
, "Failed to "
283 "register Tx callback for pid=%d, "
284 "qid=%d\n", pid
, qid
);
291 rte_latencystats_uninit(void)
296 struct rxtx_cbs
*cbs
= NULL
;
297 const uint8_t nb_ports
= rte_eth_dev_count();
299 /** De register Rx/Tx callbacks */
300 for (pid
= 0; pid
< nb_ports
; pid
++) {
301 struct rte_eth_dev_info dev_info
;
302 rte_eth_dev_info_get(pid
, &dev_info
);
303 for (qid
= 0; qid
< dev_info
.nb_rx_queues
; qid
++) {
304 cbs
= &rx_cbs
[pid
][qid
];
305 ret
= rte_eth_remove_rx_callback(pid
, qid
, cbs
->cb
);
307 RTE_LOG(INFO
, LATENCY_STATS
, "failed to "
308 "remove Rx callback for pid=%d, "
309 "qid=%d\n", pid
, qid
);
311 for (qid
= 0; qid
< dev_info
.nb_tx_queues
; qid
++) {
312 cbs
= &tx_cbs
[pid
][qid
];
313 ret
= rte_eth_remove_tx_callback(pid
, qid
, cbs
->cb
);
315 RTE_LOG(INFO
, LATENCY_STATS
, "failed to "
316 "remove Tx callback for pid=%d, "
317 "qid=%d\n", pid
, qid
);
325 rte_latencystats_get_names(struct rte_metric_name
*names
, uint16_t size
)
329 if (names
== NULL
|| size
< NUM_LATENCY_STATS
)
330 return NUM_LATENCY_STATS
;
332 for (i
= 0; i
< NUM_LATENCY_STATS
; i
++)
333 snprintf(names
[i
].name
, sizeof(names
[i
].name
),
334 "%s", lat_stats_strings
[i
].name
);
336 return NUM_LATENCY_STATS
;
340 rte_latencystats_get(struct rte_metric_value
*values
, uint16_t size
)
342 if (size
< NUM_LATENCY_STATS
|| values
== NULL
)
343 return NUM_LATENCY_STATS
;
345 if (rte_eal_process_type() == RTE_PROC_SECONDARY
) {
346 const struct rte_memzone
*mz
;
347 mz
= rte_memzone_lookup(MZ_RTE_LATENCY_STATS
);
349 RTE_LOG(ERR
, LATENCY_STATS
,
350 "Latency stats memzone not found\n");
353 glob_stats
= mz
->addr
;
356 /* Retrieve latency stats */
357 rte_latencystats_fill_values(values
);
359 return NUM_LATENCY_STATS
;