]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/examples/performance-thread/common/lthread_diag.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / examples / performance-thread / common / lthread_diag.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 Intel Corporation
7c673cae
FG
3 */
4
5#include <rte_log.h>
6#include <rte_common.h>
7
8#include "lthread_diag.h"
9#include "lthread_queue.h"
10#include "lthread_pool.h"
11#include "lthread_objcache.h"
12#include "lthread_sched.h"
13#include "lthread_diag_api.h"
14
15
16/* dummy ref value of default diagnostic callback */
17static uint64_t dummy_ref;
18
19#define DIAG_SCHED_STATS_FORMAT \
20"core %d\n%33s %12s %12s %12s %12s\n"
21
22#define DIAG_CACHE_STATS_FORMAT \
23"%20s %12lu %12lu %12lu %12lu %12lu\n"
24
25#define DIAG_QUEUE_STATS_FORMAT \
26"%20s %12lu %12lu %12lu\n"
27
28
29/*
30 * texts used in diagnostic events,
31 * corresponding diagnostic mask bit positions are given as comment
32 */
33const char *diag_event_text[] = {
34 "LTHREAD_CREATE ", /* 00 */
35 "LTHREAD_EXIT ", /* 01 */
36 "LTHREAD_JOIN ", /* 02 */
37 "LTHREAD_CANCEL ", /* 03 */
38 "LTHREAD_DETACH ", /* 04 */
39 "LTHREAD_FREE ", /* 05 */
40 "LTHREAD_SUSPENDED ", /* 06 */
41 "LTHREAD_YIELD ", /* 07 */
42 "LTHREAD_RESCHEDULED", /* 08 */
43 "LTHREAD_SLEEP ", /* 09 */
44 "LTHREAD_RESUMED ", /* 10 */
45 "LTHREAD_AFFINITY ", /* 11 */
46 "LTHREAD_TMR_START ", /* 12 */
47 "LTHREAD_TMR_DELETE ", /* 13 */
48 "LTHREAD_TMR_EXPIRED", /* 14 */
49 "COND_CREATE ", /* 15 */
50 "COND_DESTROY ", /* 16 */
51 "COND_WAIT ", /* 17 */
52 "COND_SIGNAL ", /* 18 */
53 "COND_BROADCAST ", /* 19 */
54 "MUTEX_CREATE ", /* 20 */
55 "MUTEX_DESTROY ", /* 21 */
56 "MUTEX_LOCK ", /* 22 */
57 "MUTEX_TRYLOCK ", /* 23 */
58 "MUTEX_BLOCKED ", /* 24 */
59 "MUTEX_UNLOCKED ", /* 25 */
60 "SCHED_CREATE ", /* 26 */
61 "SCHED_SHUTDOWN " /* 27 */
62};
63
64
65/*
66 * set diagnostic ,ask
67 */
68void lthread_diagnostic_set_mask(DIAG_USED uint64_t mask)
69{
70#if LTHREAD_DIAG
71 diag_mask = mask;
72#else
73 RTE_LOG(INFO, LTHREAD,
74 "LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
75#endif
76}
77
78
79/*
80 * Check consistency of the scheduler stats
81 * Only sensible run after the schedulers are stopped
82 * Count the number of objects lying in caches and queues
83 * and available in the qnode pool.
84 * This should be equal to the total capacity of all
85 * qnode pools.
86 */
87void
88_sched_stats_consistency_check(void);
89void
90_sched_stats_consistency_check(void)
91{
92#if LTHREAD_DIAG
93 int i;
94 struct lthread_sched *sched;
95 uint64_t count = 0;
96 uint64_t capacity = 0;
97
98 for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
99 sched = schedcore[i];
100 if (sched == NULL)
101 continue;
102
103 /* each of these queues consumes a stub node */
104 count += 8;
105 count += DIAG_COUNT(sched->ready, size);
106 count += DIAG_COUNT(sched->pready, size);
107 count += DIAG_COUNT(sched->lthread_cache, available);
108 count += DIAG_COUNT(sched->stack_cache, available);
109 count += DIAG_COUNT(sched->tls_cache, available);
110 count += DIAG_COUNT(sched->per_lthread_cache, available);
111 count += DIAG_COUNT(sched->cond_cache, available);
112 count += DIAG_COUNT(sched->mutex_cache, available);
113
114 /* the node pool does not consume a stub node */
115 if (sched->qnode_pool->fast_alloc != NULL)
116 count++;
117 count += DIAG_COUNT(sched->qnode_pool, available);
118
119 capacity += DIAG_COUNT(sched->qnode_pool, capacity);
120 }
121 if (count != capacity) {
122 RTE_LOG(CRIT, LTHREAD,
123 "Scheduler caches are inconsistent\n");
124 } else {
125 RTE_LOG(INFO, LTHREAD,
126 "Scheduler caches are ok\n");
127 }
128#endif
129}
130
131
132#if LTHREAD_DIAG
133/*
134 * Display node pool stats
135 */
136static inline void
137_qnode_pool_display(DIAG_USED struct qnode_pool *p)
138{
139
140 printf(DIAG_CACHE_STATS_FORMAT,
141 p->name,
142 DIAG_COUNT(p, rd),
143 DIAG_COUNT(p, wr),
144 DIAG_COUNT(p, available),
145 DIAG_COUNT(p, prealloc),
146 DIAG_COUNT(p, capacity));
147 fflush(stdout);
148}
149#endif
150
151
152#if LTHREAD_DIAG
153/*
154 * Display queue stats
155 */
156static inline void
157_lthread_queue_display(DIAG_USED struct lthread_queue *q)
158{
159#if DISPLAY_OBJCACHE_QUEUES
160 printf(DIAG_QUEUE_STATS_FORMAT,
161 q->name,
162 DIAG_COUNT(q, rd),
163 DIAG_COUNT(q, wr),
164 DIAG_COUNT(q, size));
165 fflush(stdout);
166#else
167 printf("%s: queue stats disabled\n",
168 q->name);
169
170#endif
171}
172#endif
173
174#if LTHREAD_DIAG
175/*
176 * Display objcache stats
177 */
178static inline void
179_objcache_display(DIAG_USED struct lthread_objcache *c)
180{
181
182 printf(DIAG_CACHE_STATS_FORMAT,
183 c->name,
184 DIAG_COUNT(c, rd),
185 DIAG_COUNT(c, wr),
186 DIAG_COUNT(c, available),
187 DIAG_COUNT(c, prealloc),
188 DIAG_COUNT(c, capacity));
189 _lthread_queue_display(c->q);
190 fflush(stdout);
191}
192#endif
193
194/*
195 * Display sched stats
196 */
197void
198lthread_sched_stats_display(void)
199{
200#if LTHREAD_DIAG
201 int i;
202 struct lthread_sched *sched;
203
204 for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
205 sched = schedcore[i];
206 if (sched != NULL) {
207 printf(DIAG_SCHED_STATS_FORMAT,
208 sched->lcore_id,
209 "rd",
210 "wr",
211 "present",
212 "nb preallocs",
213 "capacity");
214 _lthread_queue_display(sched->ready);
215 _lthread_queue_display(sched->pready);
216 _qnode_pool_display(sched->qnode_pool);
217 _objcache_display(sched->lthread_cache);
218 _objcache_display(sched->stack_cache);
219 _objcache_display(sched->tls_cache);
220 _objcache_display(sched->per_lthread_cache);
221 _objcache_display(sched->cond_cache);
222 _objcache_display(sched->mutex_cache);
223 fflush(stdout);
224 }
225 }
226 _sched_stats_consistency_check();
227#else
228 RTE_LOG(INFO, LTHREAD,
229 "lthread diagnostics disabled\n"
230 "hint - set LTHREAD_DIAG in lthread_diag_api.h\n");
231#endif
232}
233
234/*
235 * Defafult diagnostic callback
236 */
237static uint64_t
238_lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
239 uint64_t diag_ref, const char *text, uint64_t p1, uint64_t p2)
240{
241 uint64_t _p2;
242 int lcore = (int) rte_lcore_id();
243
244 switch (diag_event) {
245 case LT_DIAG_LTHREAD_CREATE:
246 case LT_DIAG_MUTEX_CREATE:
247 case LT_DIAG_COND_CREATE:
248 _p2 = dummy_ref;
249 break;
250 default:
251 _p2 = p2;
252 break;
253 }
254
255 printf("%"PRIu64" %d %8.8lx %8.8lx %s %8.8lx %8.8lx\n",
256 time,
257 lcore,
258 (uint64_t) lt,
259 diag_ref,
260 text,
261 p1,
262 _p2);
263
264 return dummy_ref++;
265}
266
267/*
268 * plug in default diag callback with mask off
269 */
9f95a23c 270RTE_INIT(_lthread_diag_ctor)
7c673cae
FG
271{
272 diag_cb = _lthread_diag_default_cb;
273 diag_mask = 0;
274}
275
276
277/*
278 * enable diagnostics
279 */
280void lthread_diagnostic_enable(DIAG_USED diag_callback cb,
281 DIAG_USED uint64_t mask)
282{
283#if LTHREAD_DIAG
284 if (cb == NULL)
285 diag_cb = _lthread_diag_default_cb;
286 else
287 diag_cb = cb;
288 diag_mask = mask;
289#else
290 RTE_LOG(INFO, LTHREAD,
291 "LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
292#endif
293}