]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/builtin-lock.c
perf kmem: Use perf_evsel__intval and perf_session__set_tracepoints_handlers
[mirror_ubuntu-bionic-kernel.git] / tools / perf / builtin-lock.c
CommitLineData
9b5e350c
HM
1#include "builtin.h"
2#include "perf.h"
3
fcf65bf1 4#include "util/evsel.h"
9b5e350c
HM
5#include "util/util.h"
6#include "util/cache.h"
7#include "util/symbol.h"
8#include "util/thread.h"
9#include "util/header.h"
10
11#include "util/parse-options.h"
12#include "util/trace-event.h"
13
14#include "util/debug.h"
15#include "util/session.h"
45694aa7 16#include "util/tool.h"
9b5e350c
HM
17
18#include <sys/types.h>
19#include <sys/prctl.h>
20#include <semaphore.h>
21#include <pthread.h>
22#include <math.h>
23#include <limits.h>
24
25#include <linux/list.h>
26#include <linux/hash.h>
27
e4cef1f6
HM
28static struct perf_session *session;
29
9b5e350c
HM
30/* based on kernel/lockdep.c */
31#define LOCKHASH_BITS 12
32#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
33
34static struct list_head lockhash_table[LOCKHASH_SIZE];
35
36#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
37#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
38
9b5e350c 39struct lock_stat {
59f411b6
IM
40 struct list_head hash_entry;
41 struct rb_node rb; /* used for sorting */
9b5e350c 42
59f411b6
IM
43 /*
44 * FIXME: raw_field_value() returns unsigned long long,
9b5e350c 45 * so address of lockdep_map should be dealed as 64bit.
59f411b6
IM
46 * Is there more better solution?
47 */
48 void *addr; /* address of lockdep_map, used as ID */
49 char *name; /* for strcpy(), we cannot use const */
9b5e350c 50
59f411b6 51 unsigned int nr_acquire;
e4cef1f6 52 unsigned int nr_acquired;
59f411b6
IM
53 unsigned int nr_contended;
54 unsigned int nr_release;
9b5e350c 55
e4cef1f6
HM
56 unsigned int nr_readlock;
57 unsigned int nr_trylock;
9b5e350c 58 /* these times are in nano sec. */
59f411b6
IM
59 u64 wait_time_total;
60 u64 wait_time_min;
61 u64 wait_time_max;
e4cef1f6
HM
62
63 int discard; /* flag of blacklist */
64};
65
66/*
67 * States of lock_seq_stat
68 *
69 * UNINITIALIZED is required for detecting first event of acquire.
70 * As the nature of lock events, there is no guarantee
71 * that the first event for the locks are acquire,
72 * it can be acquired, contended or release.
73 */
74#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
75#define SEQ_STATE_RELEASED 1
76#define SEQ_STATE_ACQUIRING 2
77#define SEQ_STATE_ACQUIRED 3
78#define SEQ_STATE_READ_ACQUIRED 4
79#define SEQ_STATE_CONTENDED 5
80
81/*
82 * MAX_LOCK_DEPTH
83 * Imported from include/linux/sched.h.
84 * Should this be synchronized?
85 */
86#define MAX_LOCK_DEPTH 48
87
88/*
89 * struct lock_seq_stat:
90 * Place to put on state of one lock sequence
91 * 1) acquire -> acquired -> release
92 * 2) acquire -> contended -> acquired -> release
93 * 3) acquire (with read or try) -> release
94 * 4) Are there other patterns?
95 */
96struct lock_seq_stat {
97 struct list_head list;
98 int state;
99 u64 prev_event_time;
100 void *addr;
101
102 int read_count;
9b5e350c
HM
103};
104
e4cef1f6
HM
105struct thread_stat {
106 struct rb_node rb;
107
108 u32 tid;
109 struct list_head seq_list;
110};
111
112static struct rb_root thread_stats;
113
114static struct thread_stat *thread_stat_find(u32 tid)
115{
116 struct rb_node *node;
117 struct thread_stat *st;
118
119 node = thread_stats.rb_node;
120 while (node) {
121 st = container_of(node, struct thread_stat, rb);
122 if (st->tid == tid)
123 return st;
124 else if (tid < st->tid)
125 node = node->rb_left;
126 else
127 node = node->rb_right;
128 }
129
130 return NULL;
131}
132
133static void thread_stat_insert(struct thread_stat *new)
134{
135 struct rb_node **rb = &thread_stats.rb_node;
136 struct rb_node *parent = NULL;
137 struct thread_stat *p;
138
139 while (*rb) {
140 p = container_of(*rb, struct thread_stat, rb);
141 parent = *rb;
142
143 if (new->tid < p->tid)
144 rb = &(*rb)->rb_left;
145 else if (new->tid > p->tid)
146 rb = &(*rb)->rb_right;
147 else
148 BUG_ON("inserting invalid thread_stat\n");
149 }
150
151 rb_link_node(&new->rb, parent, rb);
152 rb_insert_color(&new->rb, &thread_stats);
153}
154
155static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
156{
157 struct thread_stat *st;
158
159 st = thread_stat_find(tid);
160 if (st)
161 return st;
162
163 st = zalloc(sizeof(struct thread_stat));
33d6aef5
DA
164 if (!st) {
165 pr_err("memory allocation failed\n");
166 return NULL;
167 }
e4cef1f6
HM
168
169 st->tid = tid;
170 INIT_LIST_HEAD(&st->seq_list);
171
172 thread_stat_insert(st);
173
174 return st;
175}
176
177static struct thread_stat *thread_stat_findnew_first(u32 tid);
178static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
179 thread_stat_findnew_first;
180
181static struct thread_stat *thread_stat_findnew_first(u32 tid)
182{
183 struct thread_stat *st;
184
185 st = zalloc(sizeof(struct thread_stat));
33d6aef5
DA
186 if (!st) {
187 pr_err("memory allocation failed\n");
188 return NULL;
189 }
e4cef1f6
HM
190 st->tid = tid;
191 INIT_LIST_HEAD(&st->seq_list);
192
193 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
194 rb_insert_color(&st->rb, &thread_stats);
195
196 thread_stat_findnew = thread_stat_findnew_after_first;
197 return st;
198}
199
9b5e350c 200/* build simple key function one is bigger than two */
59f411b6 201#define SINGLE_KEY(member) \
9b5e350c
HM
202 static int lock_stat_key_ ## member(struct lock_stat *one, \
203 struct lock_stat *two) \
204 { \
205 return one->member > two->member; \
206 }
207
208SINGLE_KEY(nr_acquired)
209SINGLE_KEY(nr_contended)
210SINGLE_KEY(wait_time_total)
9b5e350c
HM
211SINGLE_KEY(wait_time_max)
212
9df03abe
MS
213static int lock_stat_key_wait_time_min(struct lock_stat *one,
214 struct lock_stat *two)
215{
216 u64 s1 = one->wait_time_min;
217 u64 s2 = two->wait_time_min;
218 if (s1 == ULLONG_MAX)
219 s1 = 0;
220 if (s2 == ULLONG_MAX)
221 s2 = 0;
222 return s1 > s2;
223}
224
9b5e350c
HM
225struct lock_key {
226 /*
227 * name: the value for specify by user
228 * this should be simpler than raw name of member
229 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
230 */
59f411b6
IM
231 const char *name;
232 int (*key)(struct lock_stat*, struct lock_stat*);
9b5e350c
HM
233};
234
59f411b6
IM
235static const char *sort_key = "acquired";
236
237static int (*compare)(struct lock_stat *, struct lock_stat *);
238
239static struct rb_root result; /* place to store sorted data */
9b5e350c
HM
240
241#define DEF_KEY_LOCK(name, fn_suffix) \
242 { #name, lock_stat_key_ ## fn_suffix }
243struct lock_key keys[] = {
244 DEF_KEY_LOCK(acquired, nr_acquired),
245 DEF_KEY_LOCK(contended, nr_contended),
246 DEF_KEY_LOCK(wait_total, wait_time_total),
247 DEF_KEY_LOCK(wait_min, wait_time_min),
248 DEF_KEY_LOCK(wait_max, wait_time_max),
249
250 /* extra comparisons much complicated should be here */
251
252 { NULL, NULL }
253};
254
33d6aef5 255static int select_key(void)
9b5e350c
HM
256{
257 int i;
258
259 for (i = 0; keys[i].name; i++) {
260 if (!strcmp(keys[i].name, sort_key)) {
261 compare = keys[i].key;
33d6aef5 262 return 0;
9b5e350c
HM
263 }
264 }
265
33d6aef5
DA
266 pr_err("Unknown compare key: %s\n", sort_key);
267
268 return -1;
9b5e350c
HM
269}
270
9b5e350c 271static void insert_to_result(struct lock_stat *st,
59f411b6 272 int (*bigger)(struct lock_stat *, struct lock_stat *))
9b5e350c
HM
273{
274 struct rb_node **rb = &result.rb_node;
275 struct rb_node *parent = NULL;
276 struct lock_stat *p;
277
278 while (*rb) {
279 p = container_of(*rb, struct lock_stat, rb);
280 parent = *rb;
281
282 if (bigger(st, p))
283 rb = &(*rb)->rb_left;
284 else
285 rb = &(*rb)->rb_right;
286 }
287
288 rb_link_node(&st->rb, parent, rb);
289 rb_insert_color(&st->rb, &result);
290}
291
292/* returns left most element of result, and erase it */
293static struct lock_stat *pop_from_result(void)
294{
295 struct rb_node *node = result.rb_node;
296
297 if (!node)
298 return NULL;
299
300 while (node->rb_left)
301 node = node->rb_left;
302
303 rb_erase(node, &result);
304 return container_of(node, struct lock_stat, rb);
305}
306
59f411b6 307static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
9b5e350c
HM
308{
309 struct list_head *entry = lockhashentry(addr);
310 struct lock_stat *ret, *new;
311
312 list_for_each_entry(ret, entry, hash_entry) {
313 if (ret->addr == addr)
314 return ret;
315 }
316
317 new = zalloc(sizeof(struct lock_stat));
318 if (!new)
319 goto alloc_failed;
320
321 new->addr = addr;
322 new->name = zalloc(sizeof(char) * strlen(name) + 1);
323 if (!new->name)
324 goto alloc_failed;
325 strcpy(new->name, name);
9b5e350c 326
9b5e350c
HM
327 new->wait_time_min = ULLONG_MAX;
328
329 list_add(&new->hash_entry, entry);
330 return new;
331
332alloc_failed:
33d6aef5
DA
333 pr_err("memory allocation failed\n");
334 return NULL;
9b5e350c
HM
335}
336
efad1415 337static const char *input_name;
9b5e350c 338
9b5e350c 339struct raw_event_sample {
59f411b6
IM
340 u32 size;
341 char data[0];
9b5e350c
HM
342};
343
344struct trace_acquire_event {
59f411b6
IM
345 void *addr;
346 const char *name;
e4cef1f6 347 int flag;
9b5e350c
HM
348};
349
350struct trace_acquired_event {
59f411b6
IM
351 void *addr;
352 const char *name;
9b5e350c
HM
353};
354
355struct trace_contended_event {
59f411b6
IM
356 void *addr;
357 const char *name;
9b5e350c
HM
358};
359
360struct trace_release_event {
59f411b6
IM
361 void *addr;
362 const char *name;
9b5e350c
HM
363};
364
365struct trace_lock_handler {
33d6aef5 366 int (*acquire_event)(struct trace_acquire_event *,
01d95524 367 const struct perf_sample *sample);
9b5e350c 368
33d6aef5 369 int (*acquired_event)(struct trace_acquired_event *,
01d95524 370 const struct perf_sample *sample);
9b5e350c 371
33d6aef5 372 int (*contended_event)(struct trace_contended_event *,
01d95524 373 const struct perf_sample *sample);
9b5e350c 374
33d6aef5 375 int (*release_event)(struct trace_release_event *,
01d95524 376 const struct perf_sample *sample);
9b5e350c
HM
377};
378
e4cef1f6
HM
379static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
380{
381 struct lock_seq_stat *seq;
382
383 list_for_each_entry(seq, &ts->seq_list, list) {
384 if (seq->addr == addr)
385 return seq;
386 }
387
388 seq = zalloc(sizeof(struct lock_seq_stat));
33d6aef5
DA
389 if (!seq) {
390 pr_err("memory allocation failed\n");
391 return NULL;
392 }
e4cef1f6
HM
393 seq->state = SEQ_STATE_UNINITIALIZED;
394 seq->addr = addr;
395
396 list_add(&seq->list, &ts->seq_list);
397 return seq;
398}
399
10350ec3
FW
400enum broken_state {
401 BROKEN_ACQUIRE,
402 BROKEN_ACQUIRED,
403 BROKEN_CONTENDED,
404 BROKEN_RELEASE,
405 BROKEN_MAX,
406};
407
408static int bad_hist[BROKEN_MAX];
e4cef1f6 409
84c7a217
FW
410enum acquire_flags {
411 TRY_LOCK = 1,
412 READ_LOCK = 2,
413};
414
33d6aef5 415static int
59f411b6 416report_lock_acquire_event(struct trace_acquire_event *acquire_event,
01d95524 417 const struct perf_sample *sample)
9b5e350c 418{
e4cef1f6
HM
419 struct lock_stat *ls;
420 struct thread_stat *ts;
421 struct lock_seq_stat *seq;
422
423 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
33d6aef5
DA
424 if (!ls)
425 return -1;
e4cef1f6 426 if (ls->discard)
33d6aef5 427 return 0;
9b5e350c 428
01d95524 429 ts = thread_stat_findnew(sample->tid);
33d6aef5
DA
430 if (!ts)
431 return -1;
432
e4cef1f6 433 seq = get_seq(ts, acquire_event->addr);
33d6aef5
DA
434 if (!seq)
435 return -1;
9b5e350c 436
e4cef1f6
HM
437 switch (seq->state) {
438 case SEQ_STATE_UNINITIALIZED:
439 case SEQ_STATE_RELEASED:
440 if (!acquire_event->flag) {
441 seq->state = SEQ_STATE_ACQUIRING;
442 } else {
84c7a217 443 if (acquire_event->flag & TRY_LOCK)
e4cef1f6 444 ls->nr_trylock++;
84c7a217 445 if (acquire_event->flag & READ_LOCK)
e4cef1f6
HM
446 ls->nr_readlock++;
447 seq->state = SEQ_STATE_READ_ACQUIRED;
448 seq->read_count = 1;
449 ls->nr_acquired++;
450 }
451 break;
452 case SEQ_STATE_READ_ACQUIRED:
84c7a217 453 if (acquire_event->flag & READ_LOCK) {
e4cef1f6
HM
454 seq->read_count++;
455 ls->nr_acquired++;
456 goto end;
457 } else {
458 goto broken;
459 }
9b5e350c 460 break;
e4cef1f6
HM
461 case SEQ_STATE_ACQUIRED:
462 case SEQ_STATE_ACQUIRING:
463 case SEQ_STATE_CONTENDED:
464broken:
465 /* broken lock sequence, discard it */
466 ls->discard = 1;
10350ec3 467 bad_hist[BROKEN_ACQUIRE]++;
e4cef1f6
HM
468 list_del(&seq->list);
469 free(seq);
470 goto end;
9b5e350c
HM
471 break;
472 default:
e4cef1f6 473 BUG_ON("Unknown state of lock sequence found!\n");
9b5e350c
HM
474 break;
475 }
476
e4cef1f6 477 ls->nr_acquire++;
01d95524 478 seq->prev_event_time = sample->time;
e4cef1f6 479end:
33d6aef5 480 return 0;
9b5e350c
HM
481}
482
33d6aef5 483static int
59f411b6 484report_lock_acquired_event(struct trace_acquired_event *acquired_event,
01d95524 485 const struct perf_sample *sample)
9b5e350c 486{
01d95524 487 u64 timestamp = sample->time;
e4cef1f6
HM
488 struct lock_stat *ls;
489 struct thread_stat *ts;
490 struct lock_seq_stat *seq;
491 u64 contended_term;
9b5e350c 492
e4cef1f6 493 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
33d6aef5
DA
494 if (!ls)
495 return -1;
e4cef1f6 496 if (ls->discard)
33d6aef5 497 return 0;
e4cef1f6 498
01d95524 499 ts = thread_stat_findnew(sample->tid);
33d6aef5
DA
500 if (!ts)
501 return -1;
502
e4cef1f6 503 seq = get_seq(ts, acquired_event->addr);
33d6aef5
DA
504 if (!seq)
505 return -1;
9b5e350c 506
e4cef1f6
HM
507 switch (seq->state) {
508 case SEQ_STATE_UNINITIALIZED:
509 /* orphan event, do nothing */
33d6aef5 510 return 0;
e4cef1f6 511 case SEQ_STATE_ACQUIRING:
9b5e350c 512 break;
e4cef1f6
HM
513 case SEQ_STATE_CONTENDED:
514 contended_term = timestamp - seq->prev_event_time;
515 ls->wait_time_total += contended_term;
e4cef1f6
HM
516 if (contended_term < ls->wait_time_min)
517 ls->wait_time_min = contended_term;
90c0e5fc 518 if (ls->wait_time_max < contended_term)
e4cef1f6 519 ls->wait_time_max = contended_term;
9b5e350c 520 break;
e4cef1f6
HM
521 case SEQ_STATE_RELEASED:
522 case SEQ_STATE_ACQUIRED:
523 case SEQ_STATE_READ_ACQUIRED:
524 /* broken lock sequence, discard it */
525 ls->discard = 1;
10350ec3 526 bad_hist[BROKEN_ACQUIRED]++;
e4cef1f6
HM
527 list_del(&seq->list);
528 free(seq);
529 goto end;
530 break;
531
9b5e350c 532 default:
e4cef1f6 533 BUG_ON("Unknown state of lock sequence found!\n");
9b5e350c
HM
534 break;
535 }
536
e4cef1f6
HM
537 seq->state = SEQ_STATE_ACQUIRED;
538 ls->nr_acquired++;
539 seq->prev_event_time = timestamp;
540end:
33d6aef5 541 return 0;
9b5e350c
HM
542}
543
33d6aef5 544static int
59f411b6 545report_lock_contended_event(struct trace_contended_event *contended_event,
01d95524 546 const struct perf_sample *sample)
9b5e350c 547{
e4cef1f6
HM
548 struct lock_stat *ls;
549 struct thread_stat *ts;
550 struct lock_seq_stat *seq;
551
552 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
33d6aef5
DA
553 if (!ls)
554 return -1;
e4cef1f6 555 if (ls->discard)
33d6aef5 556 return 0;
9b5e350c 557
01d95524 558 ts = thread_stat_findnew(sample->tid);
33d6aef5
DA
559 if (!ts)
560 return -1;
561
e4cef1f6 562 seq = get_seq(ts, contended_event->addr);
33d6aef5
DA
563 if (!seq)
564 return -1;
9b5e350c 565
e4cef1f6
HM
566 switch (seq->state) {
567 case SEQ_STATE_UNINITIALIZED:
568 /* orphan event, do nothing */
33d6aef5 569 return 0;
e4cef1f6 570 case SEQ_STATE_ACQUIRING:
9b5e350c 571 break;
e4cef1f6
HM
572 case SEQ_STATE_RELEASED:
573 case SEQ_STATE_ACQUIRED:
574 case SEQ_STATE_READ_ACQUIRED:
575 case SEQ_STATE_CONTENDED:
576 /* broken lock sequence, discard it */
577 ls->discard = 1;
10350ec3 578 bad_hist[BROKEN_CONTENDED]++;
e4cef1f6
HM
579 list_del(&seq->list);
580 free(seq);
581 goto end;
9b5e350c
HM
582 break;
583 default:
e4cef1f6 584 BUG_ON("Unknown state of lock sequence found!\n");
9b5e350c
HM
585 break;
586 }
587
e4cef1f6
HM
588 seq->state = SEQ_STATE_CONTENDED;
589 ls->nr_contended++;
01d95524 590 seq->prev_event_time = sample->time;
e4cef1f6 591end:
33d6aef5 592 return 0;
9b5e350c
HM
593}
594
33d6aef5 595static int
59f411b6 596report_lock_release_event(struct trace_release_event *release_event,
01d95524 597 const struct perf_sample *sample)
9b5e350c 598{
e4cef1f6
HM
599 struct lock_stat *ls;
600 struct thread_stat *ts;
601 struct lock_seq_stat *seq;
9b5e350c 602
e4cef1f6 603 ls = lock_stat_findnew(release_event->addr, release_event->name);
33d6aef5
DA
604 if (!ls)
605 return -1;
e4cef1f6 606 if (ls->discard)
33d6aef5 607 return 0;
9b5e350c 608
01d95524 609 ts = thread_stat_findnew(sample->tid);
33d6aef5
DA
610 if (!ts)
611 return -1;
612
e4cef1f6 613 seq = get_seq(ts, release_event->addr);
33d6aef5
DA
614 if (!seq)
615 return -1;
9b5e350c 616
e4cef1f6
HM
617 switch (seq->state) {
618 case SEQ_STATE_UNINITIALIZED:
619 goto end;
620 break;
621 case SEQ_STATE_ACQUIRED:
622 break;
623 case SEQ_STATE_READ_ACQUIRED:
624 seq->read_count--;
625 BUG_ON(seq->read_count < 0);
626 if (!seq->read_count) {
627 ls->nr_release++;
9b5e350c
HM
628 goto end;
629 }
e4cef1f6
HM
630 break;
631 case SEQ_STATE_ACQUIRING:
632 case SEQ_STATE_CONTENDED:
633 case SEQ_STATE_RELEASED:
634 /* broken lock sequence, discard it */
635 ls->discard = 1;
10350ec3 636 bad_hist[BROKEN_RELEASE]++;
e4cef1f6 637 goto free_seq;
9b5e350c
HM
638 break;
639 default:
e4cef1f6 640 BUG_ON("Unknown state of lock sequence found!\n");
9b5e350c
HM
641 break;
642 }
643
e4cef1f6
HM
644 ls->nr_release++;
645free_seq:
646 list_del(&seq->list);
647 free(seq);
9b5e350c 648end:
33d6aef5 649 return 0;
9b5e350c
HM
650}
651
652/* lock oriented handlers */
653/* TODO: handlers for CPU oriented, thread oriented */
59f411b6
IM
654static struct trace_lock_handler report_lock_ops = {
655 .acquire_event = report_lock_acquire_event,
656 .acquired_event = report_lock_acquired_event,
657 .contended_event = report_lock_contended_event,
658 .release_event = report_lock_release_event,
9b5e350c
HM
659};
660
661static struct trace_lock_handler *trace_handler;
662
33d6aef5 663static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
01d95524 664 struct perf_sample *sample)
9b5e350c
HM
665{
666 struct trace_acquire_event acquire_event;
01d95524
ACM
667 struct event_format *event = evsel->tp_format;
668 void *data = sample->raw_data;
9b5e350c 669 u64 tmp; /* this is required for casting... */
33d6aef5 670 int rc = 0;
9b5e350c
HM
671
672 tmp = raw_field_value(event, "lockdep_addr", data);
673 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
674 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
e4cef1f6 675 acquire_event.flag = (int)raw_field_value(event, "flag", data);
9b5e350c 676
59f411b6 677 if (trace_handler->acquire_event)
33d6aef5
DA
678 rc = trace_handler->acquire_event(&acquire_event, sample);
679
680 return rc;
9b5e350c
HM
681}
682
33d6aef5 683static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
01d95524 684 struct perf_sample *sample)
9b5e350c
HM
685{
686 struct trace_acquired_event acquired_event;
01d95524
ACM
687 struct event_format *event = evsel->tp_format;
688 void *data = sample->raw_data;
9b5e350c 689 u64 tmp; /* this is required for casting... */
33d6aef5 690 int rc = 0;
9b5e350c
HM
691
692 tmp = raw_field_value(event, "lockdep_addr", data);
693 memcpy(&acquired_event.addr, &tmp, sizeof(void *));
694 acquired_event.name = (char *)raw_field_ptr(event, "name", data);
9b5e350c 695
33d6aef5
DA
696 if (trace_handler->acquired_event)
697 rc = trace_handler->acquired_event(&acquired_event, sample);
698
699 return rc;
9b5e350c
HM
700}
701
33d6aef5 702static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
01d95524 703 struct perf_sample *sample)
9b5e350c
HM
704{
705 struct trace_contended_event contended_event;
01d95524
ACM
706 struct event_format *event = evsel->tp_format;
707 void *data = sample->raw_data;
9b5e350c 708 u64 tmp; /* this is required for casting... */
33d6aef5 709 int rc = 0;
9b5e350c
HM
710
711 tmp = raw_field_value(event, "lockdep_addr", data);
712 memcpy(&contended_event.addr, &tmp, sizeof(void *));
713 contended_event.name = (char *)raw_field_ptr(event, "name", data);
9b5e350c 714
33d6aef5
DA
715 if (trace_handler->contended_event)
716 rc = trace_handler->contended_event(&contended_event, sample);
717
718 return rc;
9b5e350c
HM
719}
720
33d6aef5 721static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
01d95524 722 struct perf_sample *sample)
9b5e350c
HM
723{
724 struct trace_release_event release_event;
01d95524
ACM
725 struct event_format *event = evsel->tp_format;
726 void *data = sample->raw_data;
9b5e350c 727 u64 tmp; /* this is required for casting... */
33d6aef5 728 int rc = 0;
9b5e350c
HM
729
730 tmp = raw_field_value(event, "lockdep_addr", data);
731 memcpy(&release_event.addr, &tmp, sizeof(void *));
732 release_event.name = (char *)raw_field_ptr(event, "name", data);
9b5e350c 733
33d6aef5
DA
734 if (trace_handler->release_event)
735 rc = trace_handler->release_event(&release_event, sample);
736
737 return rc;
9b5e350c
HM
738}
739
33d6aef5 740static int perf_evsel__process_lock_event(struct perf_evsel *evsel,
01d95524 741 struct perf_sample *sample)
9b5e350c 742{
fcf65bf1 743 struct event_format *event = evsel->tp_format;
33d6aef5 744 int rc = 0;
9b5e350c
HM
745
746 if (!strcmp(event->name, "lock_acquire"))
33d6aef5 747 rc = perf_evsel__process_lock_acquire(evsel, sample);
9b5e350c 748 if (!strcmp(event->name, "lock_acquired"))
33d6aef5 749 rc = perf_evsel__process_lock_acquired(evsel, sample);
9b5e350c 750 if (!strcmp(event->name, "lock_contended"))
33d6aef5 751 rc = perf_evsel__process_lock_contended(evsel, sample);
9b5e350c 752 if (!strcmp(event->name, "lock_release"))
33d6aef5
DA
753 rc = perf_evsel__process_lock_release(evsel, sample);
754
755 return rc;
9b5e350c
HM
756}
757
10350ec3
FW
758static void print_bad_events(int bad, int total)
759{
760 /* Output for debug, this have to be removed */
761 int i;
762 const char *name[4] =
763 { "acquire", "acquired", "contended", "release" };
764
765 pr_info("\n=== output for debug===\n\n");
5efe08cf
FW
766 pr_info("bad: %d, total: %d\n", bad, total);
767 pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
10350ec3
FW
768 pr_info("histogram of events caused bad sequence\n");
769 for (i = 0; i < BROKEN_MAX; i++)
770 pr_info(" %10s: %d\n", name[i], bad_hist[i]);
771}
772
9b5e350c
HM
773/* TODO: various way to print, coloring, nano or milli sec */
774static void print_result(void)
775{
776 struct lock_stat *st;
777 char cut_name[20];
e4cef1f6 778 int bad, total;
9b5e350c 779
26242d85
HM
780 pr_info("%20s ", "Name");
781 pr_info("%10s ", "acquired");
782 pr_info("%10s ", "contended");
9b5e350c 783
26242d85
HM
784 pr_info("%15s ", "total wait (ns)");
785 pr_info("%15s ", "max wait (ns)");
786 pr_info("%15s ", "min wait (ns)");
9b5e350c 787
26242d85 788 pr_info("\n\n");
9b5e350c 789
e4cef1f6 790 bad = total = 0;
9b5e350c 791 while ((st = pop_from_result())) {
e4cef1f6
HM
792 total++;
793 if (st->discard) {
794 bad++;
795 continue;
796 }
9b5e350c
HM
797 bzero(cut_name, 20);
798
9b5e350c
HM
799 if (strlen(st->name) < 16) {
800 /* output raw name */
26242d85 801 pr_info("%20s ", st->name);
9b5e350c
HM
802 } else {
803 strncpy(cut_name, st->name, 16);
804 cut_name[16] = '.';
805 cut_name[17] = '.';
806 cut_name[18] = '.';
807 cut_name[19] = '\0';
808 /* cut off name for saving output style */
26242d85 809 pr_info("%20s ", cut_name);
9b5e350c
HM
810 }
811
26242d85
HM
812 pr_info("%10u ", st->nr_acquired);
813 pr_info("%10u ", st->nr_contended);
9b5e350c 814
9486aa38
ACM
815 pr_info("%15" PRIu64 " ", st->wait_time_total);
816 pr_info("%15" PRIu64 " ", st->wait_time_max);
817 pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
9b5e350c 818 0 : st->wait_time_min);
26242d85 819 pr_info("\n");
9b5e350c 820 }
e4cef1f6 821
10350ec3 822 print_bad_events(bad, total);
9b5e350c
HM
823}
824
8035458f 825static bool info_threads, info_map;
26242d85
HM
826
827static void dump_threads(void)
828{
829 struct thread_stat *st;
830 struct rb_node *node;
831 struct thread *t;
832
833 pr_info("%10s: comm\n", "Thread ID");
834
835 node = rb_first(&thread_stats);
836 while (node) {
837 st = container_of(node, struct thread_stat, rb);
838 t = perf_session__findnew(session, st->tid);
839 pr_info("%10d: %s\n", st->tid, t->comm);
840 node = rb_next(node);
841 };
842}
843
9b5e350c
HM
844static void dump_map(void)
845{
846 unsigned int i;
847 struct lock_stat *st;
848
26242d85 849 pr_info("Address of instance: name of class\n");
9b5e350c
HM
850 for (i = 0; i < LOCKHASH_SIZE; i++) {
851 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
26242d85 852 pr_info(" %p: %s\n", st->addr, st->name);
9b5e350c
HM
853 }
854 }
855}
856
33d6aef5 857static int dump_info(void)
26242d85 858{
33d6aef5
DA
859 int rc = 0;
860
26242d85
HM
861 if (info_threads)
862 dump_threads();
863 else if (info_map)
864 dump_map();
33d6aef5
DA
865 else {
866 rc = -1;
867 pr_err("Unknown type of information\n");
868 }
869
870 return rc;
26242d85
HM
871}
872
1d037ca1 873static int process_sample_event(struct perf_tool *tool __maybe_unused,
d20deb64 874 union perf_event *event,
9e69c210 875 struct perf_sample *sample,
fcf65bf1 876 struct perf_evsel *evsel,
743eb868 877 struct machine *machine)
c61e52ee 878{
743eb868 879 struct thread *thread = machine__findnew_thread(machine, sample->tid);
c61e52ee 880
c61e52ee
FW
881 if (thread == NULL) {
882 pr_debug("problem processing %d event, skipping it.\n",
8115d60c 883 event->header.type);
c61e52ee
FW
884 return -1;
885 }
886
33d6aef5 887 return perf_evsel__process_lock_event(evsel, sample);
c61e52ee
FW
888}
889
45694aa7 890static struct perf_tool eops = {
59f411b6 891 .sample = process_sample_event,
8115d60c 892 .comm = perf_event__process_comm,
c61e52ee 893 .ordered_samples = true,
9b5e350c
HM
894};
895
9b5e350c
HM
896static int read_events(void)
897{
21ef97f0 898 session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
33d6aef5
DA
899 if (!session) {
900 pr_err("Initializing perf session failed\n");
901 return -1;
902 }
9b5e350c
HM
903
904 return perf_session__process_events(session, &eops);
905}
906
907static void sort_result(void)
908{
909 unsigned int i;
910 struct lock_stat *st;
911
912 for (i = 0; i < LOCKHASH_SIZE; i++) {
913 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
914 insert_to_result(st, compare);
915 }
916 }
917}
918
33d6aef5 919static int __cmd_report(void)
9b5e350c
HM
920{
921 setup_pager();
33d6aef5
DA
922
923 if ((select_key() != 0) ||
924 (read_events() != 0))
925 return -1;
926
9b5e350c
HM
927 sort_result();
928 print_result();
33d6aef5
DA
929
930 return 0;
9b5e350c
HM
931}
932
59f411b6
IM
933static const char * const report_usage[] = {
934 "perf lock report [<options>]",
9b5e350c
HM
935 NULL
936};
937
59f411b6 938static const struct option report_options[] = {
9b5e350c 939 OPT_STRING('k', "key", &sort_key, "acquired",
9826e832 940 "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
9b5e350c
HM
941 /* TODO: type */
942 OPT_END()
943};
944
26242d85
HM
945static const char * const info_usage[] = {
946 "perf lock info [<options>]",
947 NULL
948};
949
950static const struct option info_options[] = {
951 OPT_BOOLEAN('t', "threads", &info_threads,
952 "dump thread list in perf.data"),
953 OPT_BOOLEAN('m', "map", &info_map,
d1eec3ec 954 "map of lock instances (address:name table)"),
26242d85
HM
955 OPT_END()
956};
957
9b5e350c 958static const char * const lock_usage[] = {
d1eec3ec 959 "perf lock [<options>] {record|report|script|info}",
9b5e350c
HM
960 NULL
961};
962
963static const struct option lock_options[] = {
59f411b6 964 OPT_STRING('i', "input", &input_name, "file", "input file name"),
c0555642 965 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
59f411b6 966 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
9b5e350c
HM
967 OPT_END()
968};
969
d25dcba8
DA
970static const char * const lock_tracepoints[] = {
971 "lock:lock_acquire", /* CONFIG_LOCKDEP */
972 "lock:lock_acquired", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
973 "lock:lock_contended", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
974 "lock:lock_release", /* CONFIG_LOCKDEP */
975};
976
9b5e350c
HM
977static const char *record_args[] = {
978 "record",
9b5e350c 979 "-R",
9b5e350c
HM
980 "-f",
981 "-m", "1024",
982 "-c", "1",
9b5e350c
HM
983};
984
985static int __cmd_record(int argc, const char **argv)
986{
987 unsigned int rec_argc, i, j;
988 const char **rec_argv;
989
d25dcba8
DA
990 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
991 if (!is_valid_tracepoint(lock_tracepoints[i])) {
992 pr_err("tracepoint %s is not enabled. "
993 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
994 lock_tracepoints[i]);
995 return 1;
996 }
997 }
998
9b5e350c 999 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
d25dcba8
DA
1000 /* factor of 2 is for -e in front of each tracepoint */
1001 rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
9b5e350c 1002
d25dcba8 1003 rec_argv = calloc(rec_argc + 1, sizeof(char *));
ce47dc56
CS
1004 if (rec_argv == NULL)
1005 return -ENOMEM;
1006
9b5e350c
HM
1007 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1008 rec_argv[i] = strdup(record_args[i]);
1009
d25dcba8
DA
1010 for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
1011 rec_argv[i++] = "-e";
1012 rec_argv[i++] = strdup(lock_tracepoints[j]);
1013 }
1014
9b5e350c
HM
1015 for (j = 1; j < (unsigned int)argc; j++, i++)
1016 rec_argv[i] = argv[j];
1017
1018 BUG_ON(i != rec_argc);
1019
1020 return cmd_record(i, rec_argv, NULL);
1021}
1022
1d037ca1 1023int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
9b5e350c
HM
1024{
1025 unsigned int i;
33d6aef5 1026 int rc = 0;
9b5e350c
HM
1027
1028 symbol__init();
1029 for (i = 0; i < LOCKHASH_SIZE; i++)
1030 INIT_LIST_HEAD(lockhash_table + i);
1031
1032 argc = parse_options(argc, argv, lock_options, lock_usage,
1033 PARSE_OPT_STOP_AT_NON_OPTION);
1034 if (!argc)
1035 usage_with_options(lock_usage, lock_options);
1036
1037 if (!strncmp(argv[0], "rec", 3)) {
1038 return __cmd_record(argc, argv);
59f411b6
IM
1039 } else if (!strncmp(argv[0], "report", 6)) {
1040 trace_handler = &report_lock_ops;
9b5e350c
HM
1041 if (argc) {
1042 argc = parse_options(argc, argv,
59f411b6 1043 report_options, report_usage, 0);
9b5e350c 1044 if (argc)
59f411b6 1045 usage_with_options(report_usage, report_options);
9b5e350c 1046 }
59f411b6 1047 __cmd_report();
133dc4c3
IM
1048 } else if (!strcmp(argv[0], "script")) {
1049 /* Aliased to 'perf script' */
1050 return cmd_script(argc, argv, prefix);
26242d85
HM
1051 } else if (!strcmp(argv[0], "info")) {
1052 if (argc) {
1053 argc = parse_options(argc, argv,
1054 info_options, info_usage, 0);
1055 if (argc)
1056 usage_with_options(info_usage, info_options);
1057 }
59f411b6
IM
1058 /* recycling report_lock_ops */
1059 trace_handler = &report_lock_ops;
9b5e350c 1060 setup_pager();
33d6aef5
DA
1061 if (read_events() != 0)
1062 rc = -1;
1063 else
1064 rc = dump_info();
9b5e350c
HM
1065 } else {
1066 usage_with_options(lock_usage, lock_options);
1067 }
1068
33d6aef5 1069 return rc;
9b5e350c 1070}