4 #include "util/evlist.h"
5 #include "util/evsel.h"
7 #include "util/cache.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
12 #include "util/parse-options.h"
13 #include "util/trace-event.h"
15 #include "util/debug.h"
16 #include "util/session.h"
17 #include "util/tool.h"
19 #include <sys/types.h>
20 #include <sys/prctl.h>
21 #include <semaphore.h>
26 #include <linux/list.h>
27 #include <linux/hash.h>
29 static struct perf_session
*session
;
31 /* based on kernel/lockdep.c */
32 #define LOCKHASH_BITS 12
33 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
35 static struct list_head lockhash_table
[LOCKHASH_SIZE
];
37 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
38 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
41 struct list_head hash_entry
;
42 struct rb_node rb
; /* used for sorting */
45 * FIXME: perf_evsel__intval() returns u64,
46 * so address of lockdep_map should be dealed as 64bit.
47 * Is there more better solution?
49 void *addr
; /* address of lockdep_map, used as ID */
50 char *name
; /* for strcpy(), we cannot use const */
52 unsigned int nr_acquire
;
53 unsigned int nr_acquired
;
54 unsigned int nr_contended
;
55 unsigned int nr_release
;
57 unsigned int nr_readlock
;
58 unsigned int nr_trylock
;
59 /* these times are in nano sec. */
64 int discard
; /* flag of blacklist */
68 * States of lock_seq_stat
70 * UNINITIALIZED is required for detecting first event of acquire.
71 * As the nature of lock events, there is no guarantee
72 * that the first event for the locks are acquire,
73 * it can be acquired, contended or release.
75 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
76 #define SEQ_STATE_RELEASED 1
77 #define SEQ_STATE_ACQUIRING 2
78 #define SEQ_STATE_ACQUIRED 3
79 #define SEQ_STATE_READ_ACQUIRED 4
80 #define SEQ_STATE_CONTENDED 5
84 * Imported from include/linux/sched.h.
85 * Should this be synchronized?
87 #define MAX_LOCK_DEPTH 48
90 * struct lock_seq_stat:
91 * Place to put on state of one lock sequence
92 * 1) acquire -> acquired -> release
93 * 2) acquire -> contended -> acquired -> release
94 * 3) acquire (with read or try) -> release
95 * 4) Are there other patterns?
97 struct lock_seq_stat
{
98 struct list_head list
;
110 struct list_head seq_list
;
113 static struct rb_root thread_stats
;
115 static struct thread_stat
*thread_stat_find(u32 tid
)
117 struct rb_node
*node
;
118 struct thread_stat
*st
;
120 node
= thread_stats
.rb_node
;
122 st
= container_of(node
, struct thread_stat
, rb
);
125 else if (tid
< st
->tid
)
126 node
= node
->rb_left
;
128 node
= node
->rb_right
;
134 static void thread_stat_insert(struct thread_stat
*new)
136 struct rb_node
**rb
= &thread_stats
.rb_node
;
137 struct rb_node
*parent
= NULL
;
138 struct thread_stat
*p
;
141 p
= container_of(*rb
, struct thread_stat
, rb
);
144 if (new->tid
< p
->tid
)
145 rb
= &(*rb
)->rb_left
;
146 else if (new->tid
> p
->tid
)
147 rb
= &(*rb
)->rb_right
;
149 BUG_ON("inserting invalid thread_stat\n");
152 rb_link_node(&new->rb
, parent
, rb
);
153 rb_insert_color(&new->rb
, &thread_stats
);
156 static struct thread_stat
*thread_stat_findnew_after_first(u32 tid
)
158 struct thread_stat
*st
;
160 st
= thread_stat_find(tid
);
164 st
= zalloc(sizeof(struct thread_stat
));
166 pr_err("memory allocation failed\n");
171 INIT_LIST_HEAD(&st
->seq_list
);
173 thread_stat_insert(st
);
178 static struct thread_stat
*thread_stat_findnew_first(u32 tid
);
179 static struct thread_stat
*(*thread_stat_findnew
)(u32 tid
) =
180 thread_stat_findnew_first
;
182 static struct thread_stat
*thread_stat_findnew_first(u32 tid
)
184 struct thread_stat
*st
;
186 st
= zalloc(sizeof(struct thread_stat
));
188 pr_err("memory allocation failed\n");
192 INIT_LIST_HEAD(&st
->seq_list
);
194 rb_link_node(&st
->rb
, NULL
, &thread_stats
.rb_node
);
195 rb_insert_color(&st
->rb
, &thread_stats
);
197 thread_stat_findnew
= thread_stat_findnew_after_first
;
201 /* build simple key function one is bigger than two */
202 #define SINGLE_KEY(member) \
203 static int lock_stat_key_ ## member(struct lock_stat *one, \
204 struct lock_stat *two) \
206 return one->member > two->member; \
209 SINGLE_KEY(nr_acquired
)
210 SINGLE_KEY(nr_contended
)
211 SINGLE_KEY(wait_time_total
)
212 SINGLE_KEY(wait_time_max
)
214 static int lock_stat_key_wait_time_min(struct lock_stat
*one
,
215 struct lock_stat
*two
)
217 u64 s1
= one
->wait_time_min
;
218 u64 s2
= two
->wait_time_min
;
219 if (s1
== ULLONG_MAX
)
221 if (s2
== ULLONG_MAX
)
228 * name: the value for specify by user
229 * this should be simpler than raw name of member
230 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
233 int (*key
)(struct lock_stat
*, struct lock_stat
*);
236 static const char *sort_key
= "acquired";
238 static int (*compare
)(struct lock_stat
*, struct lock_stat
*);
240 static struct rb_root result
; /* place to store sorted data */
242 #define DEF_KEY_LOCK(name, fn_suffix) \
243 { #name, lock_stat_key_ ## fn_suffix }
244 struct lock_key keys
[] = {
245 DEF_KEY_LOCK(acquired
, nr_acquired
),
246 DEF_KEY_LOCK(contended
, nr_contended
),
247 DEF_KEY_LOCK(wait_total
, wait_time_total
),
248 DEF_KEY_LOCK(wait_min
, wait_time_min
),
249 DEF_KEY_LOCK(wait_max
, wait_time_max
),
251 /* extra comparisons much complicated should be here */
256 static int select_key(void)
260 for (i
= 0; keys
[i
].name
; i
++) {
261 if (!strcmp(keys
[i
].name
, sort_key
)) {
262 compare
= keys
[i
].key
;
267 pr_err("Unknown compare key: %s\n", sort_key
);
272 static void insert_to_result(struct lock_stat
*st
,
273 int (*bigger
)(struct lock_stat
*, struct lock_stat
*))
275 struct rb_node
**rb
= &result
.rb_node
;
276 struct rb_node
*parent
= NULL
;
280 p
= container_of(*rb
, struct lock_stat
, rb
);
284 rb
= &(*rb
)->rb_left
;
286 rb
= &(*rb
)->rb_right
;
289 rb_link_node(&st
->rb
, parent
, rb
);
290 rb_insert_color(&st
->rb
, &result
);
293 /* returns left most element of result, and erase it */
294 static struct lock_stat
*pop_from_result(void)
296 struct rb_node
*node
= result
.rb_node
;
301 while (node
->rb_left
)
302 node
= node
->rb_left
;
304 rb_erase(node
, &result
);
305 return container_of(node
, struct lock_stat
, rb
);
308 static struct lock_stat
*lock_stat_findnew(void *addr
, const char *name
)
310 struct list_head
*entry
= lockhashentry(addr
);
311 struct lock_stat
*ret
, *new;
313 list_for_each_entry(ret
, entry
, hash_entry
) {
314 if (ret
->addr
== addr
)
318 new = zalloc(sizeof(struct lock_stat
));
323 new->name
= zalloc(sizeof(char) * strlen(name
) + 1);
326 strcpy(new->name
, name
);
328 new->wait_time_min
= ULLONG_MAX
;
330 list_add(&new->hash_entry
, entry
);
334 pr_err("memory allocation failed\n");
338 static const char *input_name
;
340 struct trace_lock_handler
{
341 int (*acquire_event
)(struct perf_evsel
*evsel
,
342 struct perf_sample
*sample
);
344 int (*acquired_event
)(struct perf_evsel
*evsel
,
345 struct perf_sample
*sample
);
347 int (*contended_event
)(struct perf_evsel
*evsel
,
348 struct perf_sample
*sample
);
350 int (*release_event
)(struct perf_evsel
*evsel
,
351 struct perf_sample
*sample
);
354 static struct lock_seq_stat
*get_seq(struct thread_stat
*ts
, void *addr
)
356 struct lock_seq_stat
*seq
;
358 list_for_each_entry(seq
, &ts
->seq_list
, list
) {
359 if (seq
->addr
== addr
)
363 seq
= zalloc(sizeof(struct lock_seq_stat
));
365 pr_err("memory allocation failed\n");
368 seq
->state
= SEQ_STATE_UNINITIALIZED
;
371 list_add(&seq
->list
, &ts
->seq_list
);
383 static int bad_hist
[BROKEN_MAX
];
390 static int report_lock_acquire_event(struct perf_evsel
*evsel
,
391 struct perf_sample
*sample
)
394 struct lock_stat
*ls
;
395 struct thread_stat
*ts
;
396 struct lock_seq_stat
*seq
;
397 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
398 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
399 int flag
= perf_evsel__intval(evsel
, sample
, "flag");
401 memcpy(&addr
, &tmp
, sizeof(void *));
403 ls
= lock_stat_findnew(addr
, name
);
409 ts
= thread_stat_findnew(sample
->tid
);
413 seq
= get_seq(ts
, addr
);
417 switch (seq
->state
) {
418 case SEQ_STATE_UNINITIALIZED
:
419 case SEQ_STATE_RELEASED
:
421 seq
->state
= SEQ_STATE_ACQUIRING
;
425 if (flag
& READ_LOCK
)
427 seq
->state
= SEQ_STATE_READ_ACQUIRED
;
432 case SEQ_STATE_READ_ACQUIRED
:
433 if (flag
& READ_LOCK
) {
441 case SEQ_STATE_ACQUIRED
:
442 case SEQ_STATE_ACQUIRING
:
443 case SEQ_STATE_CONTENDED
:
445 /* broken lock sequence, discard it */
447 bad_hist
[BROKEN_ACQUIRE
]++;
448 list_del(&seq
->list
);
453 BUG_ON("Unknown state of lock sequence found!\n");
458 seq
->prev_event_time
= sample
->time
;
463 static int report_lock_acquired_event(struct perf_evsel
*evsel
,
464 struct perf_sample
*sample
)
467 struct lock_stat
*ls
;
468 struct thread_stat
*ts
;
469 struct lock_seq_stat
*seq
;
471 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
472 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
474 memcpy(&addr
, &tmp
, sizeof(void *));
476 ls
= lock_stat_findnew(addr
, name
);
482 ts
= thread_stat_findnew(sample
->tid
);
486 seq
= get_seq(ts
, addr
);
490 switch (seq
->state
) {
491 case SEQ_STATE_UNINITIALIZED
:
492 /* orphan event, do nothing */
494 case SEQ_STATE_ACQUIRING
:
496 case SEQ_STATE_CONTENDED
:
497 contended_term
= sample
->time
- seq
->prev_event_time
;
498 ls
->wait_time_total
+= contended_term
;
499 if (contended_term
< ls
->wait_time_min
)
500 ls
->wait_time_min
= contended_term
;
501 if (ls
->wait_time_max
< contended_term
)
502 ls
->wait_time_max
= contended_term
;
504 case SEQ_STATE_RELEASED
:
505 case SEQ_STATE_ACQUIRED
:
506 case SEQ_STATE_READ_ACQUIRED
:
507 /* broken lock sequence, discard it */
509 bad_hist
[BROKEN_ACQUIRED
]++;
510 list_del(&seq
->list
);
516 BUG_ON("Unknown state of lock sequence found!\n");
520 seq
->state
= SEQ_STATE_ACQUIRED
;
522 seq
->prev_event_time
= sample
->time
;
527 static int report_lock_contended_event(struct perf_evsel
*evsel
,
528 struct perf_sample
*sample
)
531 struct lock_stat
*ls
;
532 struct thread_stat
*ts
;
533 struct lock_seq_stat
*seq
;
534 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
535 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
537 memcpy(&addr
, &tmp
, sizeof(void *));
539 ls
= lock_stat_findnew(addr
, name
);
545 ts
= thread_stat_findnew(sample
->tid
);
549 seq
= get_seq(ts
, addr
);
553 switch (seq
->state
) {
554 case SEQ_STATE_UNINITIALIZED
:
555 /* orphan event, do nothing */
557 case SEQ_STATE_ACQUIRING
:
559 case SEQ_STATE_RELEASED
:
560 case SEQ_STATE_ACQUIRED
:
561 case SEQ_STATE_READ_ACQUIRED
:
562 case SEQ_STATE_CONTENDED
:
563 /* broken lock sequence, discard it */
565 bad_hist
[BROKEN_CONTENDED
]++;
566 list_del(&seq
->list
);
571 BUG_ON("Unknown state of lock sequence found!\n");
575 seq
->state
= SEQ_STATE_CONTENDED
;
577 seq
->prev_event_time
= sample
->time
;
582 static int report_lock_release_event(struct perf_evsel
*evsel
,
583 struct perf_sample
*sample
)
586 struct lock_stat
*ls
;
587 struct thread_stat
*ts
;
588 struct lock_seq_stat
*seq
;
589 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
590 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
592 memcpy(&addr
, &tmp
, sizeof(void *));
594 ls
= lock_stat_findnew(addr
, name
);
600 ts
= thread_stat_findnew(sample
->tid
);
604 seq
= get_seq(ts
, addr
);
608 switch (seq
->state
) {
609 case SEQ_STATE_UNINITIALIZED
:
612 case SEQ_STATE_ACQUIRED
:
614 case SEQ_STATE_READ_ACQUIRED
:
616 BUG_ON(seq
->read_count
< 0);
617 if (!seq
->read_count
) {
622 case SEQ_STATE_ACQUIRING
:
623 case SEQ_STATE_CONTENDED
:
624 case SEQ_STATE_RELEASED
:
625 /* broken lock sequence, discard it */
627 bad_hist
[BROKEN_RELEASE
]++;
631 BUG_ON("Unknown state of lock sequence found!\n");
637 list_del(&seq
->list
);
643 /* lock oriented handlers */
644 /* TODO: handlers for CPU oriented, thread oriented */
645 static struct trace_lock_handler report_lock_ops
= {
646 .acquire_event
= report_lock_acquire_event
,
647 .acquired_event
= report_lock_acquired_event
,
648 .contended_event
= report_lock_contended_event
,
649 .release_event
= report_lock_release_event
,
652 static struct trace_lock_handler
*trace_handler
;
654 static int perf_evsel__process_lock_acquire(struct perf_evsel
*evsel
,
655 struct perf_sample
*sample
)
657 if (trace_handler
->acquire_event
)
658 return trace_handler
->acquire_event(evsel
, sample
);
662 static int perf_evsel__process_lock_acquired(struct perf_evsel
*evsel
,
663 struct perf_sample
*sample
)
665 if (trace_handler
->acquired_event
)
666 return trace_handler
->acquired_event(evsel
, sample
);
670 static int perf_evsel__process_lock_contended(struct perf_evsel
*evsel
,
671 struct perf_sample
*sample
)
673 if (trace_handler
->contended_event
)
674 return trace_handler
->contended_event(evsel
, sample
);
678 static int perf_evsel__process_lock_release(struct perf_evsel
*evsel
,
679 struct perf_sample
*sample
)
681 if (trace_handler
->release_event
)
682 return trace_handler
->release_event(evsel
, sample
);
686 static void print_bad_events(int bad
, int total
)
688 /* Output for debug, this have to be removed */
690 const char *name
[4] =
691 { "acquire", "acquired", "contended", "release" };
693 pr_info("\n=== output for debug===\n\n");
694 pr_info("bad: %d, total: %d\n", bad
, total
);
695 pr_info("bad rate: %f %%\n", (double)bad
/ (double)total
* 100);
696 pr_info("histogram of events caused bad sequence\n");
697 for (i
= 0; i
< BROKEN_MAX
; i
++)
698 pr_info(" %10s: %d\n", name
[i
], bad_hist
[i
]);
701 /* TODO: various way to print, coloring, nano or milli sec */
702 static void print_result(void)
704 struct lock_stat
*st
;
708 pr_info("%20s ", "Name");
709 pr_info("%10s ", "acquired");
710 pr_info("%10s ", "contended");
712 pr_info("%15s ", "total wait (ns)");
713 pr_info("%15s ", "max wait (ns)");
714 pr_info("%15s ", "min wait (ns)");
719 while ((st
= pop_from_result())) {
727 if (strlen(st
->name
) < 16) {
728 /* output raw name */
729 pr_info("%20s ", st
->name
);
731 strncpy(cut_name
, st
->name
, 16);
736 /* cut off name for saving output style */
737 pr_info("%20s ", cut_name
);
740 pr_info("%10u ", st
->nr_acquired
);
741 pr_info("%10u ", st
->nr_contended
);
743 pr_info("%15" PRIu64
" ", st
->wait_time_total
);
744 pr_info("%15" PRIu64
" ", st
->wait_time_max
);
745 pr_info("%15" PRIu64
" ", st
->wait_time_min
== ULLONG_MAX
?
746 0 : st
->wait_time_min
);
750 print_bad_events(bad
, total
);
753 static bool info_threads
, info_map
;
755 static void dump_threads(void)
757 struct thread_stat
*st
;
758 struct rb_node
*node
;
761 pr_info("%10s: comm\n", "Thread ID");
763 node
= rb_first(&thread_stats
);
765 st
= container_of(node
, struct thread_stat
, rb
);
766 t
= perf_session__findnew(session
, st
->tid
);
767 pr_info("%10d: %s\n", st
->tid
, t
->comm
);
768 node
= rb_next(node
);
772 static void dump_map(void)
775 struct lock_stat
*st
;
777 pr_info("Address of instance: name of class\n");
778 for (i
= 0; i
< LOCKHASH_SIZE
; i
++) {
779 list_for_each_entry(st
, &lockhash_table
[i
], hash_entry
) {
780 pr_info(" %p: %s\n", st
->addr
, st
->name
);
785 static int dump_info(void)
795 pr_err("Unknown type of information\n");
801 typedef int (*tracepoint_handler
)(struct perf_evsel
*evsel
,
802 struct perf_sample
*sample
);
804 static int process_sample_event(struct perf_tool
*tool __maybe_unused
,
805 union perf_event
*event
,
806 struct perf_sample
*sample
,
807 struct perf_evsel
*evsel
,
808 struct machine
*machine
)
810 struct thread
*thread
= machine__findnew_thread(machine
, sample
->tid
);
812 if (thread
== NULL
) {
813 pr_debug("problem processing %d event, skipping it.\n",
818 if (evsel
->handler
.func
!= NULL
) {
819 tracepoint_handler f
= evsel
->handler
.func
;
820 return f(evsel
, sample
);
826 static const struct perf_evsel_str_handler lock_tracepoints
[] = {
827 { "lock:lock_acquire", perf_evsel__process_lock_acquire
, }, /* CONFIG_LOCKDEP */
828 { "lock:lock_acquired", perf_evsel__process_lock_acquired
, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
829 { "lock:lock_contended", perf_evsel__process_lock_contended
, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
830 { "lock:lock_release", perf_evsel__process_lock_release
, }, /* CONFIG_LOCKDEP */
833 static int read_events(void)
835 struct perf_tool eops
= {
836 .sample
= process_sample_event
,
837 .comm
= perf_event__process_comm
,
838 .ordered_samples
= true,
840 session
= perf_session__new(input_name
, O_RDONLY
, 0, false, &eops
);
842 pr_err("Initializing perf session failed\n");
846 if (perf_session__set_tracepoints_handlers(session
, lock_tracepoints
)) {
847 pr_err("Initializing perf session tracepoint handlers failed\n");
851 return perf_session__process_events(session
, &eops
);
854 static void sort_result(void)
857 struct lock_stat
*st
;
859 for (i
= 0; i
< LOCKHASH_SIZE
; i
++) {
860 list_for_each_entry(st
, &lockhash_table
[i
], hash_entry
) {
861 insert_to_result(st
, compare
);
866 static int __cmd_report(void)
870 if ((select_key() != 0) ||
871 (read_events() != 0))
880 static int __cmd_record(int argc
, const char **argv
)
882 const char *record_args
[] = {
883 "record", "-R", "-f", "-m", "1024", "-c", "1",
885 unsigned int rec_argc
, i
, j
;
886 const char **rec_argv
;
888 for (i
= 0; i
< ARRAY_SIZE(lock_tracepoints
); i
++) {
889 if (!is_valid_tracepoint(lock_tracepoints
[i
].name
)) {
890 pr_err("tracepoint %s is not enabled. "
891 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
892 lock_tracepoints
[i
].name
);
897 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
898 /* factor of 2 is for -e in front of each tracepoint */
899 rec_argc
+= 2 * ARRAY_SIZE(lock_tracepoints
);
901 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
902 if (rec_argv
== NULL
)
905 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
906 rec_argv
[i
] = strdup(record_args
[i
]);
908 for (j
= 0; j
< ARRAY_SIZE(lock_tracepoints
); j
++) {
909 rec_argv
[i
++] = "-e";
910 rec_argv
[i
++] = strdup(lock_tracepoints
[j
].name
);
913 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
914 rec_argv
[i
] = argv
[j
];
916 BUG_ON(i
!= rec_argc
);
918 return cmd_record(i
, rec_argv
, NULL
);
921 int cmd_lock(int argc
, const char **argv
, const char *prefix __maybe_unused
)
923 const struct option info_options
[] = {
924 OPT_BOOLEAN('t', "threads", &info_threads
,
925 "dump thread list in perf.data"),
926 OPT_BOOLEAN('m', "map", &info_map
,
927 "map of lock instances (address:name table)"),
930 const struct option lock_options
[] = {
931 OPT_STRING('i', "input", &input_name
, "file", "input file name"),
932 OPT_INCR('v', "verbose", &verbose
, "be more verbose (show symbol address, etc)"),
933 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
, "dump raw trace in ASCII"),
936 const struct option report_options
[] = {
937 OPT_STRING('k', "key", &sort_key
, "acquired",
938 "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
942 const char * const info_usage
[] = {
943 "perf lock info [<options>]",
946 const char * const lock_usage
[] = {
947 "perf lock [<options>] {record|report|script|info}",
950 const char * const report_usage
[] = {
951 "perf lock report [<options>]",
958 for (i
= 0; i
< LOCKHASH_SIZE
; i
++)
959 INIT_LIST_HEAD(lockhash_table
+ i
);
961 argc
= parse_options(argc
, argv
, lock_options
, lock_usage
,
962 PARSE_OPT_STOP_AT_NON_OPTION
);
964 usage_with_options(lock_usage
, lock_options
);
966 if (!strncmp(argv
[0], "rec", 3)) {
967 return __cmd_record(argc
, argv
);
968 } else if (!strncmp(argv
[0], "report", 6)) {
969 trace_handler
= &report_lock_ops
;
971 argc
= parse_options(argc
, argv
,
972 report_options
, report_usage
, 0);
974 usage_with_options(report_usage
, report_options
);
977 } else if (!strcmp(argv
[0], "script")) {
978 /* Aliased to 'perf script' */
979 return cmd_script(argc
, argv
, prefix
);
980 } else if (!strcmp(argv
[0], "info")) {
982 argc
= parse_options(argc
, argv
,
983 info_options
, info_usage
, 0);
985 usage_with_options(info_usage
, info_options
);
987 /* recycling report_lock_ops */
988 trace_handler
= &report_lock_ops
;
990 if (read_events() != 0)
995 usage_with_options(lock_usage
, lock_options
);