]>
Commit | Line | Data |
---|---|---|
5f86b80b JO |
1 | #include <linux/list.h> |
2 | #include "ordered-events.h" | |
3 | #include "evlist.h" | |
4 | #include "session.h" | |
5 | #include "asm/bug.h" | |
6 | #include "debug.h" | |
7 | ||
8 | static void queue_event(struct ordered_events *oe, struct ordered_event *new) | |
9 | { | |
10 | struct ordered_event *last = oe->last; | |
11 | u64 timestamp = new->timestamp; | |
12 | struct list_head *p; | |
13 | ||
14 | ++oe->nr_events; | |
15 | oe->last = new; | |
16 | ||
17 | if (!last) { | |
18 | list_add(&new->list, &oe->events); | |
19 | oe->max_timestamp = timestamp; | |
20 | return; | |
21 | } | |
22 | ||
23 | /* | |
24 | * last event might point to some random place in the list as it's | |
25 | * the last queued event. We expect that the new event is close to | |
26 | * this. | |
27 | */ | |
28 | if (last->timestamp <= timestamp) { | |
29 | while (last->timestamp <= timestamp) { | |
30 | p = last->list.next; | |
31 | if (p == &oe->events) { | |
32 | list_add_tail(&new->list, &oe->events); | |
33 | oe->max_timestamp = timestamp; | |
34 | return; | |
35 | } | |
36 | last = list_entry(p, struct ordered_event, list); | |
37 | } | |
38 | list_add_tail(&new->list, &last->list); | |
39 | } else { | |
40 | while (last->timestamp > timestamp) { | |
41 | p = last->list.prev; | |
42 | if (p == &oe->events) { | |
43 | list_add(&new->list, &oe->events); | |
44 | return; | |
45 | } | |
46 | last = list_entry(p, struct ordered_event, list); | |
47 | } | |
48 | list_add(&new->list, &last->list); | |
49 | } | |
50 | } | |
51 | ||
52 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event)) | |
53 | static struct ordered_event *alloc_event(struct ordered_events *oe) | |
54 | { | |
55 | struct list_head *cache = &oe->cache; | |
56 | struct ordered_event *new = NULL; | |
57 | ||
58 | if (!list_empty(cache)) { | |
59 | new = list_entry(cache->next, struct ordered_event, list); | |
60 | list_del(&new->list); | |
61 | } else if (oe->buffer) { | |
62 | new = oe->buffer + oe->buffer_idx; | |
63 | if (++oe->buffer_idx == MAX_SAMPLE_BUFFER) | |
64 | oe->buffer = NULL; | |
65 | } else if (oe->cur_alloc_size < oe->max_alloc_size) { | |
66 | size_t size = MAX_SAMPLE_BUFFER * sizeof(*new); | |
67 | ||
68 | oe->buffer = malloc(size); | |
69 | if (!oe->buffer) | |
70 | return NULL; | |
71 | ||
72 | oe->cur_alloc_size += size; | |
73 | list_add(&oe->buffer->list, &oe->to_free); | |
74 | ||
75 | /* First entry is abused to maintain the to_free list. */ | |
76 | oe->buffer_idx = 2; | |
77 | new = oe->buffer + 1; | |
78 | } | |
79 | ||
80 | return new; | |
81 | } | |
82 | ||
83 | struct ordered_event * | |
84 | ordered_events__new(struct ordered_events *oe, u64 timestamp) | |
85 | { | |
86 | struct ordered_event *new; | |
87 | ||
88 | new = alloc_event(oe); | |
89 | if (new) { | |
90 | new->timestamp = timestamp; | |
91 | queue_event(oe, new); | |
92 | } | |
93 | ||
94 | return new; | |
95 | } | |
96 | ||
97 | void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) | |
98 | { | |
99 | list_del(&event->list); | |
100 | list_add(&event->list, &oe->cache); | |
101 | oe->nr_events--; | |
102 | } | |
103 | ||
104 | static int __ordered_events__flush(struct perf_session *s, | |
105 | struct perf_tool *tool) | |
106 | { | |
107 | struct ordered_events *oe = &s->ordered_events; | |
108 | struct list_head *head = &oe->events; | |
109 | struct ordered_event *tmp, *iter; | |
110 | struct perf_sample sample; | |
111 | u64 limit = oe->next_flush; | |
112 | u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; | |
113 | bool show_progress = limit == ULLONG_MAX; | |
114 | struct ui_progress prog; | |
115 | int ret; | |
116 | ||
117 | if (!tool->ordered_events || !limit) | |
118 | return 0; | |
119 | ||
120 | if (show_progress) | |
121 | ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); | |
122 | ||
123 | list_for_each_entry_safe(iter, tmp, head, list) { | |
124 | if (session_done()) | |
125 | return 0; | |
126 | ||
127 | if (iter->timestamp > limit) | |
128 | break; | |
129 | ||
130 | ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); | |
131 | if (ret) | |
132 | pr_err("Can't parse sample, err = %d\n", ret); | |
133 | else { | |
134 | ret = perf_session__deliver_event(s, iter->event, &sample, tool, | |
135 | iter->file_offset); | |
136 | if (ret) | |
137 | return ret; | |
138 | } | |
139 | ||
140 | ordered_events__delete(oe, iter); | |
141 | oe->last_flush = iter->timestamp; | |
142 | ||
143 | if (show_progress) | |
144 | ui_progress__update(&prog, 1); | |
145 | } | |
146 | ||
147 | if (list_empty(head)) | |
148 | oe->last = NULL; | |
149 | else if (last_ts <= limit) | |
150 | oe->last = list_entry(head->prev, struct ordered_event, list); | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
155 | int ordered_events__flush(struct perf_session *s, struct perf_tool *tool, | |
156 | enum oe_flush how) | |
157 | { | |
158 | struct ordered_events *oe = &s->ordered_events; | |
159 | int err; | |
160 | ||
161 | switch (how) { | |
162 | case OE_FLUSH__FINAL: | |
163 | oe->next_flush = ULLONG_MAX; | |
164 | break; | |
165 | ||
166 | case OE_FLUSH__HALF: | |
167 | { | |
168 | struct ordered_event *first, *last; | |
169 | struct list_head *head = &oe->events; | |
170 | ||
171 | first = list_entry(head->next, struct ordered_event, list); | |
172 | last = oe->last; | |
173 | ||
174 | /* Warn if we are called before any event got allocated. */ | |
175 | if (WARN_ONCE(!last || list_empty(head), "empty queue")) | |
176 | return 0; | |
177 | ||
178 | oe->next_flush = first->timestamp; | |
179 | oe->next_flush += (last->timestamp - first->timestamp) / 2; | |
180 | break; | |
181 | } | |
182 | ||
183 | case OE_FLUSH__ROUND: | |
184 | default: | |
185 | break; | |
186 | }; | |
187 | ||
188 | err = __ordered_events__flush(s, tool); | |
189 | ||
190 | if (!err) { | |
191 | if (how == OE_FLUSH__ROUND) | |
192 | oe->next_flush = oe->max_timestamp; | |
193 | } | |
194 | ||
195 | return err; | |
196 | } |