]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf tests: Removing 'optional' field
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
b0a7d1a0
ACM
1#include "debug.h"
2#include "event.h"
9d2f8e22
ACM
3#include "machine.h"
4#include "map.h"
5#include "thread.h"
6#include <stdbool.h>
7
8static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
9 bool create)
10{
11 struct rb_node **p = &machine->threads.rb_node;
12 struct rb_node *parent = NULL;
13 struct thread *th;
14
15 /*
16 * Font-end cache - PID lookups come in blocks,
17 * so most of the time we dont have to look up
18 * the full rbtree:
19 */
20 if (machine->last_match && machine->last_match->pid == pid)
21 return machine->last_match;
22
23 while (*p != NULL) {
24 parent = *p;
25 th = rb_entry(parent, struct thread, rb_node);
26
27 if (th->pid == pid) {
28 machine->last_match = th;
29 return th;
30 }
31
32 if (pid < th->pid)
33 p = &(*p)->rb_left;
34 else
35 p = &(*p)->rb_right;
36 }
37
38 if (!create)
39 return NULL;
40
41 th = thread__new(pid);
42 if (th != NULL) {
43 rb_link_node(&th->rb_node, parent, p);
44 rb_insert_color(&th->rb_node, &machine->threads);
45 machine->last_match = th;
46 }
47
48 return th;
49}
50
51struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
52{
53 return __machine__findnew_thread(machine, pid, true);
54}
55
56struct thread *machine__find_thread(struct machine *machine, pid_t pid)
57{
58 return __machine__findnew_thread(machine, pid, false);
59}
b0a7d1a0
ACM
60
61int machine__process_comm_event(struct machine *machine, union perf_event *event)
62{
63 struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
64
65 if (dump_trace)
66 perf_event__fprintf_comm(event, stdout);
67
68 if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
69 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
70 return -1;
71 }
72
73 return 0;
74}
75
76int machine__process_lost_event(struct machine *machine __maybe_unused,
77 union perf_event *event)
78{
79 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
80 event->lost.id, event->lost.lost);
81 return 0;
82}
83
84static void machine__set_kernel_mmap_len(struct machine *machine,
85 union perf_event *event)
86{
87 machine->vmlinux_maps[MAP__FUNCTION]->start = event->mmap.start;
88 machine->vmlinux_maps[MAP__FUNCTION]->end = (event->mmap.start +
89 event->mmap.len);
90 /*
91 * Be a bit paranoid here, some perf.data file came with
92 * a zero sized synthesized MMAP event for the kernel.
93 */
94 if (machine->vmlinux_maps[MAP__FUNCTION]->end == 0)
95 machine->vmlinux_maps[MAP__FUNCTION]->end = ~0ULL;
96}
97
98static int machine__process_kernel_mmap_event(struct machine *machine,
99 union perf_event *event)
100{
101 struct map *map;
102 char kmmap_prefix[PATH_MAX];
103 enum dso_kernel_type kernel_type;
104 bool is_kernel_mmap;
105
106 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
107 if (machine__is_host(machine))
108 kernel_type = DSO_TYPE_KERNEL;
109 else
110 kernel_type = DSO_TYPE_GUEST_KERNEL;
111
112 is_kernel_mmap = memcmp(event->mmap.filename,
113 kmmap_prefix,
114 strlen(kmmap_prefix) - 1) == 0;
115 if (event->mmap.filename[0] == '/' ||
116 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
117
118 char short_module_name[1024];
119 char *name, *dot;
120
121 if (event->mmap.filename[0] == '/') {
122 name = strrchr(event->mmap.filename, '/');
123 if (name == NULL)
124 goto out_problem;
125
126 ++name; /* skip / */
127 dot = strrchr(name, '.');
128 if (dot == NULL)
129 goto out_problem;
130 snprintf(short_module_name, sizeof(short_module_name),
131 "[%.*s]", (int)(dot - name), name);
132 strxfrchar(short_module_name, '-', '_');
133 } else
134 strcpy(short_module_name, event->mmap.filename);
135
136 map = machine__new_module(machine, event->mmap.start,
137 event->mmap.filename);
138 if (map == NULL)
139 goto out_problem;
140
141 name = strdup(short_module_name);
142 if (name == NULL)
143 goto out_problem;
144
145 map->dso->short_name = name;
146 map->dso->sname_alloc = 1;
147 map->end = map->start + event->mmap.len;
148 } else if (is_kernel_mmap) {
149 const char *symbol_name = (event->mmap.filename +
150 strlen(kmmap_prefix));
151 /*
152 * Should be there already, from the build-id table in
153 * the header.
154 */
155 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
156 kmmap_prefix);
157 if (kernel == NULL)
158 goto out_problem;
159
160 kernel->kernel = kernel_type;
161 if (__machine__create_kernel_maps(machine, kernel) < 0)
162 goto out_problem;
163
164 machine__set_kernel_mmap_len(machine, event);
165
166 /*
167 * Avoid using a zero address (kptr_restrict) for the ref reloc
168 * symbol. Effectively having zero here means that at record
169 * time /proc/sys/kernel/kptr_restrict was non zero.
170 */
171 if (event->mmap.pgoff != 0) {
172 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
173 symbol_name,
174 event->mmap.pgoff);
175 }
176
177 if (machine__is_default_guest(machine)) {
178 /*
179 * preload dso of guest kernel and modules
180 */
181 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
182 NULL);
183 }
184 }
185 return 0;
186out_problem:
187 return -1;
188}
189
190int machine__process_mmap_event(struct machine *machine, union perf_event *event)
191{
192 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
193 struct thread *thread;
194 struct map *map;
195 int ret = 0;
196
197 if (dump_trace)
198 perf_event__fprintf_mmap(event, stdout);
199
200 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
201 cpumode == PERF_RECORD_MISC_KERNEL) {
202 ret = machine__process_kernel_mmap_event(machine, event);
203 if (ret < 0)
204 goto out_problem;
205 return 0;
206 }
207
208 thread = machine__findnew_thread(machine, event->mmap.pid);
209 if (thread == NULL)
210 goto out_problem;
211 map = map__new(&machine->user_dsos, event->mmap.start,
212 event->mmap.len, event->mmap.pgoff,
213 event->mmap.pid, event->mmap.filename,
214 MAP__FUNCTION);
215 if (map == NULL)
216 goto out_problem;
217
218 thread__insert_map(thread, map);
219 return 0;
220
221out_problem:
222 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
223 return 0;
224}
225
226int machine__process_fork_event(struct machine *machine, union perf_event *event)
227{
228 struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
229 struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
230
231 if (dump_trace)
232 perf_event__fprintf_task(event, stdout);
233
234 if (thread == NULL || parent == NULL ||
235 thread__fork(thread, parent) < 0) {
236 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
237 return -1;
238 }
239
240 return 0;
241}
242
243int machine__process_exit_event(struct machine *machine, union perf_event *event)
244{
245 struct thread *thread = machine__find_thread(machine, event->fork.tid);
246
247 if (dump_trace)
248 perf_event__fprintf_task(event, stdout);
249
250 if (thread != NULL)
251 machine__remove_thread(machine, thread);
252
253 return 0;
254}
255
256int machine__process_event(struct machine *machine, union perf_event *event)
257{
258 int ret;
259
260 switch (event->header.type) {
261 case PERF_RECORD_COMM:
262 ret = machine__process_comm_event(machine, event); break;
263 case PERF_RECORD_MMAP:
264 ret = machine__process_mmap_event(machine, event); break;
265 case PERF_RECORD_FORK:
266 ret = machine__process_fork_event(machine, event); break;
267 case PERF_RECORD_EXIT:
268 ret = machine__process_exit_event(machine, event); break;
269 case PERF_RECORD_LOST:
270 ret = machine__process_lost_event(machine, event); break;
271 default:
272 ret = -1;
273 break;
274 }
275
276 return ret;
277}