]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - tools/power/x86/turbostat/turbostat.c
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[mirror_ubuntu-zesty-kernel.git] / tools / power / x86 / turbostat / turbostat.c
CommitLineData
103a8fea
LB
1/*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
4 *
e23da037 5 * Copyright (c) 2012 Intel Corporation.
103a8fea
LB
6 * Len Brown <len.brown@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
88c3281f 22#define _GNU_SOURCE
103a8fea
LB
23#include <stdio.h>
24#include <unistd.h>
25#include <sys/types.h>
26#include <sys/wait.h>
27#include <sys/stat.h>
28#include <sys/resource.h>
29#include <fcntl.h>
30#include <signal.h>
31#include <sys/time.h>
32#include <stdlib.h>
33#include <dirent.h>
34#include <string.h>
35#include <ctype.h>
88c3281f 36#include <sched.h>
103a8fea
LB
37
38#define MSR_TSC 0x10
39#define MSR_NEHALEM_PLATFORM_INFO 0xCE
40#define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD
41#define MSR_APERF 0xE8
42#define MSR_MPERF 0xE7
43#define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */
44#define MSR_PKG_C3_RESIDENCY 0x3F8
45#define MSR_PKG_C6_RESIDENCY 0x3F9
46#define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */
47#define MSR_CORE_C3_RESIDENCY 0x3FC
48#define MSR_CORE_C6_RESIDENCY 0x3FD
49#define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */
50
51char *proc_stat = "/proc/stat";
52unsigned int interval_sec = 5; /* set with -i interval_sec */
53unsigned int verbose; /* set with -v */
e23da037 54unsigned int summary_only; /* set with -s */
103a8fea
LB
55unsigned int skip_c0;
56unsigned int skip_c1;
57unsigned int do_nhm_cstates;
58unsigned int do_snb_cstates;
59unsigned int has_aperf;
60unsigned int units = 1000000000; /* Ghz etc */
61unsigned int genuine_intel;
62unsigned int has_invariant_tsc;
63unsigned int do_nehalem_platform_info;
64unsigned int do_nehalem_turbo_ratio_limit;
65unsigned int extra_msr_offset;
66double bclk;
67unsigned int show_pkg;
68unsigned int show_core;
69unsigned int show_cpu;
c98d5d94
LB
70unsigned int show_pkg_only;
71unsigned int show_core_only;
72char *output_buffer, *outp;
103a8fea
LB
73
74int aperf_mperf_unstable;
75int backwards_count;
76char *progname;
103a8fea 77
c98d5d94
LB
78cpu_set_t *cpu_present_set, *cpu_affinity_set;
79size_t cpu_present_setsize, cpu_affinity_setsize;
80
81struct thread_data {
82 unsigned long long tsc;
83 unsigned long long aperf;
84 unsigned long long mperf;
85 unsigned long long c1; /* derived */
86 unsigned long long extra_msr;
87 unsigned int cpu_id;
88 unsigned int flags;
89#define CPU_IS_FIRST_THREAD_IN_CORE 0x2
90#define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
91} *thread_even, *thread_odd;
92
93struct core_data {
94 unsigned long long c3;
95 unsigned long long c6;
96 unsigned long long c7;
97 unsigned int core_id;
98} *core_even, *core_odd;
99
100struct pkg_data {
101 unsigned long long pc2;
102 unsigned long long pc3;
103 unsigned long long pc6;
104 unsigned long long pc7;
105 unsigned int package_id;
106} *package_even, *package_odd;
107
108#define ODD_COUNTERS thread_odd, core_odd, package_odd
109#define EVEN_COUNTERS thread_even, core_even, package_even
110
111#define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
112 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
113 topo.num_threads_per_core + \
114 (core_no) * topo.num_threads_per_core + (thread_no))
115#define GET_CORE(core_base, core_no, pkg_no) \
116 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
117#define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
118
119struct system_summary {
120 struct thread_data threads;
121 struct core_data cores;
122 struct pkg_data packages;
123} sum, average;
124
125
126struct topo_params {
127 int num_packages;
128 int num_cpus;
129 int num_cores;
130 int max_cpu_num;
131 int num_cores_per_pkg;
132 int num_threads_per_core;
133} topo;
134
135struct timeval tv_even, tv_odd, tv_delta;
136
137void setup_all_buffers(void);
138
139int cpu_is_not_present(int cpu)
d15cf7c1 140{
c98d5d94 141 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
d15cf7c1 142}
88c3281f 143/*
c98d5d94
LB
144 * run func(thread, core, package) in topology order
145 * skip non-present cpus
88c3281f 146 */
c98d5d94
LB
147
148int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
149 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
88c3281f 150{
c98d5d94 151 int retval, pkg_no, core_no, thread_no;
d15cf7c1 152
c98d5d94
LB
153 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
154 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
155 for (thread_no = 0; thread_no <
156 topo.num_threads_per_core; ++thread_no) {
157 struct thread_data *t;
158 struct core_data *c;
159 struct pkg_data *p;
88c3281f 160
c98d5d94
LB
161 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
162
163 if (cpu_is_not_present(t->cpu_id))
164 continue;
165
166 c = GET_CORE(core_base, core_no, pkg_no);
167 p = GET_PKG(pkg_base, pkg_no);
168
169 retval = func(t, c, p);
170 if (retval)
171 return retval;
172 }
173 }
174 }
175 return 0;
88c3281f
LB
176}
177
178int cpu_migrate(int cpu)
179{
c98d5d94
LB
180 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
181 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
182 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
88c3281f
LB
183 return -1;
184 else
185 return 0;
186}
187
15aaa346 188int get_msr(int cpu, off_t offset, unsigned long long *msr)
103a8fea
LB
189{
190 ssize_t retval;
103a8fea
LB
191 char pathname[32];
192 int fd;
193
194 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
195 fd = open(pathname, O_RDONLY);
15aaa346
LB
196 if (fd < 0)
197 return -1;
103a8fea 198
15aaa346 199 retval = pread(fd, msr, sizeof *msr, offset);
103a8fea 200 close(fd);
15aaa346
LB
201
202 if (retval != sizeof *msr)
203 return -1;
204
205 return 0;
103a8fea
LB
206}
207
a829eb4d 208void print_header(void)
103a8fea
LB
209{
210 if (show_pkg)
c98d5d94 211 outp += sprintf(outp, "pk");
e23da037 212 if (show_pkg)
c98d5d94 213 outp += sprintf(outp, " ");
103a8fea 214 if (show_core)
c98d5d94 215 outp += sprintf(outp, "cor");
103a8fea 216 if (show_cpu)
c98d5d94 217 outp += sprintf(outp, " CPU");
e23da037 218 if (show_pkg || show_core || show_cpu)
c98d5d94 219 outp += sprintf(outp, " ");
103a8fea 220 if (do_nhm_cstates)
c98d5d94 221 outp += sprintf(outp, " %%c0");
103a8fea 222 if (has_aperf)
c98d5d94
LB
223 outp += sprintf(outp, " GHz");
224 outp += sprintf(outp, " TSC");
103a8fea 225 if (do_nhm_cstates)
c98d5d94 226 outp += sprintf(outp, " %%c1");
103a8fea 227 if (do_nhm_cstates)
c98d5d94 228 outp += sprintf(outp, " %%c3");
103a8fea 229 if (do_nhm_cstates)
c98d5d94 230 outp += sprintf(outp, " %%c6");
103a8fea 231 if (do_snb_cstates)
c98d5d94 232 outp += sprintf(outp, " %%c7");
103a8fea 233 if (do_snb_cstates)
c98d5d94 234 outp += sprintf(outp, " %%pc2");
103a8fea 235 if (do_nhm_cstates)
c98d5d94 236 outp += sprintf(outp, " %%pc3");
103a8fea 237 if (do_nhm_cstates)
c98d5d94 238 outp += sprintf(outp, " %%pc6");
103a8fea 239 if (do_snb_cstates)
c98d5d94 240 outp += sprintf(outp, " %%pc7");
103a8fea 241 if (extra_msr_offset)
c98d5d94 242 outp += sprintf(outp, " MSR 0x%x ", extra_msr_offset);
103a8fea 243
c98d5d94 244 outp += sprintf(outp, "\n");
103a8fea
LB
245}
246
c98d5d94
LB
247int dump_counters(struct thread_data *t, struct core_data *c,
248 struct pkg_data *p)
103a8fea 249{
c98d5d94
LB
250 fprintf(stderr, "t %p, c %p, p %p\n", t, c, p);
251
252 if (t) {
253 fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags);
254 fprintf(stderr, "TSC: %016llX\n", t->tsc);
255 fprintf(stderr, "aperf: %016llX\n", t->aperf);
256 fprintf(stderr, "mperf: %016llX\n", t->mperf);
257 fprintf(stderr, "c1: %016llX\n", t->c1);
258 fprintf(stderr, "msr0x%x: %016llX\n",
259 extra_msr_offset, t->extra_msr);
260 }
103a8fea 261
c98d5d94
LB
262 if (c) {
263 fprintf(stderr, "core: %d\n", c->core_id);
264 fprintf(stderr, "c3: %016llX\n", c->c3);
265 fprintf(stderr, "c6: %016llX\n", c->c6);
266 fprintf(stderr, "c7: %016llX\n", c->c7);
267 }
103a8fea 268
c98d5d94
LB
269 if (p) {
270 fprintf(stderr, "package: %d\n", p->package_id);
271 fprintf(stderr, "pc2: %016llX\n", p->pc2);
272 fprintf(stderr, "pc3: %016llX\n", p->pc3);
273 fprintf(stderr, "pc6: %016llX\n", p->pc6);
274 fprintf(stderr, "pc7: %016llX\n", p->pc7);
275 }
276 return 0;
103a8fea
LB
277}
278
e23da037
LB
279/*
280 * column formatting convention & formats
281 * package: "pk" 2 columns %2d
282 * core: "cor" 3 columns %3d
283 * CPU: "CPU" 3 columns %3d
284 * GHz: "GHz" 3 columns %3.2
285 * TSC: "TSC" 3 columns %3.2
286 * percentage " %pc3" %6.2
287 */
c98d5d94
LB
288int format_counters(struct thread_data *t, struct core_data *c,
289 struct pkg_data *p)
103a8fea
LB
290{
291 double interval_float;
292
c98d5d94
LB
293 /* if showing only 1st thread in core and this isn't one, bail out */
294 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
295 return 0;
296
297 /* if showing only 1st thread in pkg and this isn't one, bail out */
298 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
299 return 0;
300
103a8fea
LB
301 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
302
c98d5d94
LB
303 /* topo columns, print blanks on 1st (average) line */
304 if (t == &average.threads) {
103a8fea 305 if (show_pkg)
c98d5d94 306 outp += sprintf(outp, " ");
e23da037 307 if (show_pkg && show_core)
c98d5d94 308 outp += sprintf(outp, " ");
103a8fea 309 if (show_core)
c98d5d94 310 outp += sprintf(outp, " ");
103a8fea 311 if (show_cpu)
c98d5d94 312 outp += sprintf(outp, " " " ");
103a8fea 313 } else {
c98d5d94
LB
314 if (show_pkg) {
315 if (p)
316 outp += sprintf(outp, "%2d", p->package_id);
317 else
318 outp += sprintf(outp, " ");
319 }
e23da037 320 if (show_pkg && show_core)
c98d5d94
LB
321 outp += sprintf(outp, " ");
322 if (show_core) {
323 if (c)
324 outp += sprintf(outp, "%3d", c->core_id);
325 else
326 outp += sprintf(outp, " ");
327 }
103a8fea 328 if (show_cpu)
c98d5d94 329 outp += sprintf(outp, " %3d", t->cpu_id);
103a8fea
LB
330 }
331
332 /* %c0 */
333 if (do_nhm_cstates) {
e23da037 334 if (show_pkg || show_core || show_cpu)
c98d5d94 335 outp += sprintf(outp, " ");
103a8fea 336 if (!skip_c0)
c98d5d94 337 outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
103a8fea 338 else
c98d5d94 339 outp += sprintf(outp, " ****");
103a8fea
LB
340 }
341
342 /* GHz */
343 if (has_aperf) {
344 if (!aperf_mperf_unstable) {
c98d5d94
LB
345 outp += sprintf(outp, " %3.2f",
346 1.0 * t->tsc / units * t->aperf /
347 t->mperf / interval_float);
103a8fea 348 } else {
c98d5d94
LB
349 if (t->aperf > t->tsc || t->mperf > t->tsc) {
350 outp += sprintf(outp, " ***");
103a8fea 351 } else {
c98d5d94
LB
352 outp += sprintf(outp, "%3.1f*",
353 1.0 * t->tsc /
354 units * t->aperf /
355 t->mperf / interval_float);
103a8fea
LB
356 }
357 }
358 }
359
360 /* TSC */
c98d5d94 361 outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
103a8fea
LB
362
363 if (do_nhm_cstates) {
364 if (!skip_c1)
c98d5d94 365 outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
103a8fea 366 else
c98d5d94 367 outp += sprintf(outp, " ****");
103a8fea 368 }
c98d5d94
LB
369
370 /* print per-core data only for 1st thread in core */
371 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
372 goto done;
373
103a8fea 374 if (do_nhm_cstates)
c98d5d94 375 outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
103a8fea 376 if (do_nhm_cstates)
c98d5d94 377 outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
103a8fea 378 if (do_snb_cstates)
c98d5d94
LB
379 outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
380
381 /* print per-package data only for 1st core in package */
382 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
383 goto done;
384
103a8fea 385 if (do_snb_cstates)
c98d5d94 386 outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
103a8fea 387 if (do_nhm_cstates)
c98d5d94 388 outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
103a8fea 389 if (do_nhm_cstates)
c98d5d94 390 outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
103a8fea 391 if (do_snb_cstates)
c98d5d94
LB
392 outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
393done:
103a8fea 394 if (extra_msr_offset)
c98d5d94
LB
395 outp += sprintf(outp, " 0x%016llx", t->extra_msr);
396 outp += sprintf(outp, "\n");
397
398 return 0;
103a8fea
LB
399}
400
c98d5d94
LB
401void flush_stdout()
402{
403 fputs(output_buffer, stdout);
404 outp = output_buffer;
405}
406void flush_stderr()
407{
408 fputs(output_buffer, stderr);
409 outp = output_buffer;
410}
411void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
103a8fea 412{
e23da037 413 static int printed;
103a8fea 414
e23da037
LB
415 if (!printed || !summary_only)
416 print_header();
103a8fea 417
c98d5d94
LB
418 if (topo.num_cpus > 1)
419 format_counters(&average.threads, &average.cores,
420 &average.packages);
103a8fea 421
e23da037
LB
422 printed = 1;
423
424 if (summary_only)
425 return;
426
c98d5d94 427 for_all_cpus(format_counters, t, c, p);
103a8fea
LB
428}
429
c98d5d94
LB
430void
431delta_package(struct pkg_data *new, struct pkg_data *old)
432{
433 old->pc2 = new->pc2 - old->pc2;
434 old->pc3 = new->pc3 - old->pc3;
435 old->pc6 = new->pc6 - old->pc6;
436 old->pc7 = new->pc7 - old->pc7;
437}
103a8fea 438
c98d5d94
LB
439void
440delta_core(struct core_data *new, struct core_data *old)
103a8fea 441{
c98d5d94
LB
442 old->c3 = new->c3 - old->c3;
443 old->c6 = new->c6 - old->c6;
444 old->c7 = new->c7 - old->c7;
445}
103a8fea 446
c3ae331d
LB
447/*
448 * old = new - old
449 */
c98d5d94
LB
450void
451delta_thread(struct thread_data *new, struct thread_data *old,
452 struct core_data *core_delta)
453{
454 old->tsc = new->tsc - old->tsc;
455
456 /* check for TSC < 1 Mcycles over interval */
457 if (old->tsc < (1000 * 1000)) {
458 fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n");
459 fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n");
460 fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n");
461 exit(-3);
462 }
103a8fea 463
c98d5d94 464 old->c1 = new->c1 - old->c1;
103a8fea 465
c98d5d94
LB
466 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
467 old->aperf = new->aperf - old->aperf;
468 old->mperf = new->mperf - old->mperf;
469 } else {
103a8fea 470
c98d5d94
LB
471 if (!aperf_mperf_unstable) {
472 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
473 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
474 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
103a8fea 475
c98d5d94 476 aperf_mperf_unstable = 1;
103a8fea 477 }
103a8fea 478 /*
c98d5d94
LB
479 * mperf delta is likely a huge "positive" number
480 * can not use it for calculating c0 time
103a8fea 481 */
c98d5d94
LB
482 skip_c0 = 1;
483 skip_c1 = 1;
484 }
103a8fea 485
103a8fea 486
c98d5d94 487 /*
c3ae331d
LB
488 * As counter collection is not atomic,
489 * it is possible for mperf's non-halted cycles + idle states
c98d5d94
LB
490 * to exceed TSC's all cycles: show c1 = 0% in that case.
491 */
c3ae331d 492 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
c98d5d94
LB
493 old->c1 = 0;
494 else {
495 /* normal case, derive c1 */
496 old->c1 = old->tsc - old->mperf - core_delta->c3
497 - core_delta->c6 - core_delta->c7;
498 }
c3ae331d 499
c98d5d94 500 if (old->mperf == 0) {
c3ae331d 501 if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
c98d5d94 502 old->mperf = 1; /* divide by 0 protection */
103a8fea 503 }
c98d5d94
LB
504
505 /*
506 * for "extra msr", just copy the latest w/o subtracting
507 */
508 old->extra_msr = new->extra_msr;
509}
510
511int delta_cpu(struct thread_data *t, struct core_data *c,
512 struct pkg_data *p, struct thread_data *t2,
513 struct core_data *c2, struct pkg_data *p2)
514{
515 /* calculate core delta only for 1st thread in core */
516 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
517 delta_core(c, c2);
518
519 /* always calculate thread delta */
520 delta_thread(t, t2, c2); /* c2 is core delta */
521
522 /* calculate package delta only for 1st core in package */
523 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
524 delta_package(p, p2);
525
103a8fea
LB
526 return 0;
527}
528
c98d5d94
LB
529void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
530{
531 t->tsc = 0;
532 t->aperf = 0;
533 t->mperf = 0;
534 t->c1 = 0;
535
536 /* tells format_counters to dump all fields from this set */
537 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
538
539 c->c3 = 0;
540 c->c6 = 0;
541 c->c7 = 0;
542
543 p->pc2 = 0;
544 p->pc3 = 0;
545 p->pc6 = 0;
546 p->pc7 = 0;
547}
548int sum_counters(struct thread_data *t, struct core_data *c,
549 struct pkg_data *p)
103a8fea 550{
c98d5d94
LB
551 average.threads.tsc += t->tsc;
552 average.threads.aperf += t->aperf;
553 average.threads.mperf += t->mperf;
554 average.threads.c1 += t->c1;
103a8fea 555
c98d5d94
LB
556 /* sum per-core values only for 1st thread in core */
557 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
558 return 0;
103a8fea 559
c98d5d94
LB
560 average.cores.c3 += c->c3;
561 average.cores.c6 += c->c6;
562 average.cores.c7 += c->c7;
563
564 /* sum per-pkg values only for 1st core in pkg */
565 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
566 return 0;
567
568 average.packages.pc2 += p->pc2;
569 average.packages.pc3 += p->pc3;
570 average.packages.pc6 += p->pc6;
571 average.packages.pc7 += p->pc7;
572
573 return 0;
574}
575/*
576 * sum the counters for all cpus in the system
577 * compute the weighted average
578 */
579void compute_average(struct thread_data *t, struct core_data *c,
580 struct pkg_data *p)
581{
582 clear_counters(&average.threads, &average.cores, &average.packages);
583
584 for_all_cpus(sum_counters, t, c, p);
585
586 average.threads.tsc /= topo.num_cpus;
587 average.threads.aperf /= topo.num_cpus;
588 average.threads.mperf /= topo.num_cpus;
589 average.threads.c1 /= topo.num_cpus;
590
591 average.cores.c3 /= topo.num_cores;
592 average.cores.c6 /= topo.num_cores;
593 average.cores.c7 /= topo.num_cores;
594
595 average.packages.pc2 /= topo.num_packages;
596 average.packages.pc3 /= topo.num_packages;
597 average.packages.pc6 /= topo.num_packages;
598 average.packages.pc7 /= topo.num_packages;
103a8fea
LB
599}
600
c98d5d94 601static unsigned long long rdtsc(void)
103a8fea 602{
c98d5d94 603 unsigned int low, high;
15aaa346 604
c98d5d94 605 asm volatile("rdtsc" : "=a" (low), "=d" (high));
15aaa346 606
c98d5d94
LB
607 return low | ((unsigned long long)high) << 32;
608}
15aaa346 609
15aaa346 610
c98d5d94
LB
611/*
612 * get_counters(...)
613 * migrate to cpu
614 * acquire and record local counters for that cpu
615 */
616int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
617{
618 int cpu = t->cpu_id;
88c3281f 619
c98d5d94
LB
620 if (cpu_migrate(cpu))
621 return -1;
15aaa346 622
c98d5d94
LB
623 t->tsc = rdtsc(); /* we are running on local CPU of interest */
624
625 if (has_aperf) {
626 if (get_msr(cpu, MSR_APERF, &t->aperf))
627 return -3;
628 if (get_msr(cpu, MSR_MPERF, &t->mperf))
629 return -4;
630 }
631
632 if (extra_msr_offset)
633 if (get_msr(cpu, extra_msr_offset, &t->extra_msr))
634 return -5;
635
636 /* collect core counters only for 1st thread in core */
637 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
638 return 0;
639
640 if (do_nhm_cstates) {
641 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
642 return -6;
643 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
644 return -7;
645 }
646
647 if (do_snb_cstates)
648 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
649 return -8;
650
651 /* collect package counters only for 1st core in package */
652 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
653 return 0;
654
655 if (do_nhm_cstates) {
656 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
657 return -9;
658 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
659 return -10;
660 }
661 if (do_snb_cstates) {
662 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
663 return -11;
664 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
665 return -12;
103a8fea 666 }
15aaa346 667 return 0;
103a8fea
LB
668}
669
c98d5d94 670void print_verbose_header(void)
103a8fea
LB
671{
672 unsigned long long msr;
673 unsigned int ratio;
674
675 if (!do_nehalem_platform_info)
676 return;
677
15aaa346 678 get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
103a8fea
LB
679
680 ratio = (msr >> 40) & 0xFF;
681 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
682 ratio, bclk, ratio * bclk);
683
684 ratio = (msr >> 8) & 0xFF;
685 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
686 ratio, bclk, ratio * bclk);
687
688 if (verbose > 1)
689 fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
690
691 if (!do_nehalem_turbo_ratio_limit)
692 return;
693
15aaa346 694 get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
103a8fea
LB
695
696 ratio = (msr >> 24) & 0xFF;
697 if (ratio)
698 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
699 ratio, bclk, ratio * bclk);
700
701 ratio = (msr >> 16) & 0xFF;
702 if (ratio)
703 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
704 ratio, bclk, ratio * bclk);
705
706 ratio = (msr >> 8) & 0xFF;
707 if (ratio)
708 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
709 ratio, bclk, ratio * bclk);
710
711 ratio = (msr >> 0) & 0xFF;
712 if (ratio)
713 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
714 ratio, bclk, ratio * bclk);
715
716}
717
c98d5d94 718void free_all_buffers(void)
103a8fea 719{
c98d5d94
LB
720 CPU_FREE(cpu_present_set);
721 cpu_present_set = NULL;
722 cpu_present_set = 0;
103a8fea 723
c98d5d94
LB
724 CPU_FREE(cpu_affinity_set);
725 cpu_affinity_set = NULL;
726 cpu_affinity_setsize = 0;
103a8fea 727
c98d5d94
LB
728 free(thread_even);
729 free(core_even);
730 free(package_even);
103a8fea 731
c98d5d94
LB
732 thread_even = NULL;
733 core_even = NULL;
734 package_even = NULL;
103a8fea 735
c98d5d94
LB
736 free(thread_odd);
737 free(core_odd);
738 free(package_odd);
103a8fea 739
c98d5d94
LB
740 thread_odd = NULL;
741 core_odd = NULL;
742 package_odd = NULL;
103a8fea 743
c98d5d94
LB
744 free(output_buffer);
745 output_buffer = NULL;
746 outp = NULL;
103a8fea
LB
747}
748
c98d5d94
LB
749/*
750 * cpu_is_first_sibling_in_core(cpu)
751 * return 1 if given CPU is 1st HT sibling in the core
752 */
753int cpu_is_first_sibling_in_core(int cpu)
103a8fea 754{
c98d5d94
LB
755 char path[64];
756 FILE *filep;
757 int first_cpu;
103a8fea 758
c98d5d94
LB
759 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
760 filep = fopen(path, "r");
761 if (filep == NULL) {
762 perror(path);
763 exit(1);
103a8fea 764 }
c98d5d94
LB
765 fscanf(filep, "%d", &first_cpu);
766 fclose(filep);
767 return (cpu == first_cpu);
103a8fea
LB
768}
769
c98d5d94
LB
770/*
771 * cpu_is_first_core_in_package(cpu)
772 * return 1 if given CPU is 1st core in package
773 */
774int cpu_is_first_core_in_package(int cpu)
103a8fea 775{
c98d5d94
LB
776 char path[64];
777 FILE *filep;
778 int first_cpu;
103a8fea 779
c98d5d94
LB
780 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
781 filep = fopen(path, "r");
782 if (filep == NULL) {
783 perror(path);
103a8fea
LB
784 exit(1);
785 }
c98d5d94
LB
786 fscanf(filep, "%d", &first_cpu);
787 fclose(filep);
788 return (cpu == first_cpu);
103a8fea
LB
789}
790
791int get_physical_package_id(int cpu)
792{
c98d5d94 793 char path[80];
103a8fea
LB
794 FILE *filep;
795 int pkg;
796
797 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
798 filep = fopen(path, "r");
799 if (filep == NULL) {
800 perror(path);
801 exit(1);
802 }
803 fscanf(filep, "%d", &pkg);
804 fclose(filep);
805 return pkg;
806}
807
808int get_core_id(int cpu)
809{
c98d5d94 810 char path[80];
103a8fea
LB
811 FILE *filep;
812 int core;
813
814 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
815 filep = fopen(path, "r");
816 if (filep == NULL) {
817 perror(path);
818 exit(1);
819 }
820 fscanf(filep, "%d", &core);
821 fclose(filep);
822 return core;
823}
824
c98d5d94
LB
825int get_num_ht_siblings(int cpu)
826{
827 char path[80];
828 FILE *filep;
829 int sib1, sib2;
830 int matches;
831 char character;
832
833 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
834 filep = fopen(path, "r");
835 if (filep == NULL) {
836 perror(path);
837 exit(1);
838 }
839 /*
840 * file format:
841 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
842 * otherwinse 1 sibling (self).
843 */
844 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
845
846 fclose(filep);
847
848 if (matches == 3)
849 return 2;
850 else
851 return 1;
852}
853
103a8fea 854/*
c98d5d94
LB
855 * run func(thread, core, package) in topology order
856 * skip non-present cpus
103a8fea
LB
857 */
858
c98d5d94
LB
859int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
860 struct pkg_data *, struct thread_data *, struct core_data *,
861 struct pkg_data *), struct thread_data *thread_base,
862 struct core_data *core_base, struct pkg_data *pkg_base,
863 struct thread_data *thread_base2, struct core_data *core_base2,
864 struct pkg_data *pkg_base2)
865{
866 int retval, pkg_no, core_no, thread_no;
867
868 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
869 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
870 for (thread_no = 0; thread_no <
871 topo.num_threads_per_core; ++thread_no) {
872 struct thread_data *t, *t2;
873 struct core_data *c, *c2;
874 struct pkg_data *p, *p2;
875
876 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
877
878 if (cpu_is_not_present(t->cpu_id))
879 continue;
880
881 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
882
883 c = GET_CORE(core_base, core_no, pkg_no);
884 c2 = GET_CORE(core_base2, core_no, pkg_no);
885
886 p = GET_PKG(pkg_base, pkg_no);
887 p2 = GET_PKG(pkg_base2, pkg_no);
888
889 retval = func(t, c, p, t2, c2, p2);
890 if (retval)
891 return retval;
892 }
893 }
894 }
895 return 0;
896}
897
898/*
899 * run func(cpu) on every cpu in /proc/stat
900 * return max_cpu number
901 */
902int for_all_proc_cpus(int (func)(int))
103a8fea
LB
903{
904 FILE *fp;
c98d5d94 905 int cpu_num;
103a8fea
LB
906 int retval;
907
908 fp = fopen(proc_stat, "r");
909 if (fp == NULL) {
910 perror(proc_stat);
911 exit(1);
912 }
913
914 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
915 if (retval != 0) {
916 perror("/proc/stat format");
917 exit(1);
918 }
919
c98d5d94
LB
920 while (1) {
921 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
103a8fea
LB
922 if (retval != 1)
923 break;
924
c98d5d94
LB
925 retval = func(cpu_num);
926 if (retval) {
927 fclose(fp);
928 return(retval);
929 }
103a8fea
LB
930 }
931 fclose(fp);
c98d5d94 932 return 0;
103a8fea
LB
933}
934
935void re_initialize(void)
936{
c98d5d94
LB
937 free_all_buffers();
938 setup_all_buffers();
939 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
103a8fea
LB
940}
941
c98d5d94 942
103a8fea 943/*
c98d5d94
LB
944 * count_cpus()
945 * remember the last one seen, it will be the max
103a8fea 946 */
c98d5d94 947int count_cpus(int cpu)
103a8fea 948{
c98d5d94
LB
949 if (topo.max_cpu_num < cpu)
950 topo.max_cpu_num = cpu;
103a8fea 951
c98d5d94
LB
952 topo.num_cpus += 1;
953 return 0;
954}
955int mark_cpu_present(int cpu)
956{
957 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
15aaa346 958 return 0;
103a8fea
LB
959}
960
961void turbostat_loop()
962{
c98d5d94
LB
963 int retval;
964
103a8fea 965restart:
c98d5d94
LB
966 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
967 if (retval) {
968 re_initialize();
969 goto restart;
970 }
103a8fea
LB
971 gettimeofday(&tv_even, (struct timezone *)NULL);
972
973 while (1) {
c98d5d94 974 if (for_all_proc_cpus(cpu_is_not_present)) {
103a8fea
LB
975 re_initialize();
976 goto restart;
977 }
978 sleep(interval_sec);
c98d5d94
LB
979 retval = for_all_cpus(get_counters, ODD_COUNTERS);
980 if (retval) {
15aaa346
LB
981 re_initialize();
982 goto restart;
983 }
103a8fea 984 gettimeofday(&tv_odd, (struct timezone *)NULL);
103a8fea 985 timersub(&tv_odd, &tv_even, &tv_delta);
c98d5d94
LB
986 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
987 compute_average(EVEN_COUNTERS);
988 format_all_counters(EVEN_COUNTERS);
989 flush_stdout();
15aaa346 990 sleep(interval_sec);
c98d5d94
LB
991 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
992 if (retval) {
103a8fea
LB
993 re_initialize();
994 goto restart;
995 }
103a8fea 996 gettimeofday(&tv_even, (struct timezone *)NULL);
103a8fea 997 timersub(&tv_even, &tv_odd, &tv_delta);
c98d5d94
LB
998 for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
999 compute_average(ODD_COUNTERS);
1000 format_all_counters(ODD_COUNTERS);
1001 flush_stdout();
103a8fea
LB
1002 }
1003}
1004
1005void check_dev_msr()
1006{
1007 struct stat sb;
1008
1009 if (stat("/dev/cpu/0/msr", &sb)) {
1010 fprintf(stderr, "no /dev/cpu/0/msr\n");
1011 fprintf(stderr, "Try \"# modprobe msr\"\n");
1012 exit(-5);
1013 }
1014}
1015
1016void check_super_user()
1017{
1018 if (getuid() != 0) {
1019 fprintf(stderr, "must be root\n");
1020 exit(-6);
1021 }
1022}
1023
1024int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
1025{
1026 if (!genuine_intel)
1027 return 0;
1028
1029 if (family != 6)
1030 return 0;
1031
1032 switch (model) {
1033 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1034 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1035 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1036 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1037 case 0x2C: /* Westmere EP - Gulftown */
1038 case 0x2A: /* SNB */
1039 case 0x2D: /* SNB Xeon */
553575f1
LB
1040 case 0x3A: /* IVB */
1041 case 0x3D: /* IVB Xeon */
103a8fea
LB
1042 return 1;
1043 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1044 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1045 default:
1046 return 0;
1047 }
1048}
1049
1050int is_snb(unsigned int family, unsigned int model)
1051{
1052 if (!genuine_intel)
1053 return 0;
1054
1055 switch (model) {
1056 case 0x2A:
1057 case 0x2D:
650a37f3
LB
1058 case 0x3A: /* IVB */
1059 case 0x3D: /* IVB Xeon */
103a8fea
LB
1060 return 1;
1061 }
1062 return 0;
1063}
1064
1065double discover_bclk(unsigned int family, unsigned int model)
1066{
1067 if (is_snb(family, model))
1068 return 100.00;
1069 else
1070 return 133.33;
1071}
1072
1073void check_cpuid()
1074{
1075 unsigned int eax, ebx, ecx, edx, max_level;
1076 unsigned int fms, family, model, stepping;
1077
1078 eax = ebx = ecx = edx = 0;
1079
1080 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
1081
1082 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1083 genuine_intel = 1;
1084
1085 if (verbose)
1086 fprintf(stderr, "%.4s%.4s%.4s ",
1087 (char *)&ebx, (char *)&edx, (char *)&ecx);
1088
1089 asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
1090 family = (fms >> 8) & 0xf;
1091 model = (fms >> 4) & 0xf;
1092 stepping = fms & 0xf;
1093 if (family == 6 || family == 0xf)
1094 model += ((fms >> 16) & 0xf) << 4;
1095
1096 if (verbose)
1097 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
1098 max_level, family, model, stepping, family, model, stepping);
1099
1100 if (!(edx & (1 << 5))) {
1101 fprintf(stderr, "CPUID: no MSR\n");
1102 exit(1);
1103 }
1104
1105 /*
1106 * check max extended function levels of CPUID.
1107 * This is needed to check for invariant TSC.
1108 * This check is valid for both Intel and AMD.
1109 */
1110 ebx = ecx = edx = 0;
1111 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
1112
1113 if (max_level < 0x80000007) {
1114 fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
1115 exit(1);
1116 }
1117
1118 /*
1119 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
1120 * this check is valid for both Intel and AMD
1121 */
1122 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
8209e054 1123 has_invariant_tsc = edx & (1 << 8);
103a8fea
LB
1124
1125 if (!has_invariant_tsc) {
1126 fprintf(stderr, "No invariant TSC\n");
1127 exit(1);
1128 }
1129
1130 /*
1131 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
1132 * this check is valid for both Intel and AMD
1133 */
1134
1135 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
8209e054 1136 has_aperf = ecx & (1 << 0);
103a8fea
LB
1137 if (!has_aperf) {
1138 fprintf(stderr, "No APERF MSR\n");
1139 exit(1);
1140 }
1141
1142 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
1143 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
1144 do_snb_cstates = is_snb(family, model);
1145 bclk = discover_bclk(family, model);
1146
1147 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
1148}
1149
1150
1151void usage()
1152{
1153 fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
1154 progname);
1155 exit(1);
1156}
1157
1158
1159/*
1160 * in /dev/cpu/ return success for names that are numbers
1161 * ie. filter out ".", "..", "microcode".
1162 */
1163int dir_filter(const struct dirent *dirp)
1164{
1165 if (isdigit(dirp->d_name[0]))
1166 return 1;
1167 else
1168 return 0;
1169}
1170
1171int open_dev_cpu_msr(int dummy1)
1172{
1173 return 0;
1174}
1175
c98d5d94
LB
1176void topology_probe()
1177{
1178 int i;
1179 int max_core_id = 0;
1180 int max_package_id = 0;
1181 int max_siblings = 0;
1182 struct cpu_topology {
1183 int core_id;
1184 int physical_package_id;
1185 } *cpus;
1186
1187 /* Initialize num_cpus, max_cpu_num */
1188 topo.num_cpus = 0;
1189 topo.max_cpu_num = 0;
1190 for_all_proc_cpus(count_cpus);
1191 if (!summary_only && topo.num_cpus > 1)
1192 show_cpu = 1;
1193
1194 if (verbose > 1)
1195 fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
1196
1197 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
1198 if (cpus == NULL) {
1199 perror("calloc cpus");
1200 exit(1);
1201 }
1202
1203 /*
1204 * Allocate and initialize cpu_present_set
1205 */
1206 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
1207 if (cpu_present_set == NULL) {
1208 perror("CPU_ALLOC");
1209 exit(3);
1210 }
1211 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1212 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
1213 for_all_proc_cpus(mark_cpu_present);
1214
1215 /*
1216 * Allocate and initialize cpu_affinity_set
1217 */
1218 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1219 if (cpu_affinity_set == NULL) {
1220 perror("CPU_ALLOC");
1221 exit(3);
1222 }
1223 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1224 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
1225
1226
1227 /*
1228 * For online cpus
1229 * find max_core_id, max_package_id
1230 */
1231 for (i = 0; i <= topo.max_cpu_num; ++i) {
1232 int siblings;
1233
1234 if (cpu_is_not_present(i)) {
1235 if (verbose > 1)
1236 fprintf(stderr, "cpu%d NOT PRESENT\n", i);
1237 continue;
1238 }
1239 cpus[i].core_id = get_core_id(i);
1240 if (cpus[i].core_id > max_core_id)
1241 max_core_id = cpus[i].core_id;
1242
1243 cpus[i].physical_package_id = get_physical_package_id(i);
1244 if (cpus[i].physical_package_id > max_package_id)
1245 max_package_id = cpus[i].physical_package_id;
1246
1247 siblings = get_num_ht_siblings(i);
1248 if (siblings > max_siblings)
1249 max_siblings = siblings;
1250 if (verbose > 1)
1251 fprintf(stderr, "cpu %d pkg %d core %d\n",
1252 i, cpus[i].physical_package_id, cpus[i].core_id);
1253 }
1254 topo.num_cores_per_pkg = max_core_id + 1;
1255 if (verbose > 1)
1256 fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
1257 max_core_id, topo.num_cores_per_pkg);
1258 if (!summary_only && topo.num_cores_per_pkg > 1)
1259 show_core = 1;
1260
1261 topo.num_packages = max_package_id + 1;
1262 if (verbose > 1)
1263 fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
1264 max_package_id, topo.num_packages);
1265 if (!summary_only && topo.num_packages > 1)
1266 show_pkg = 1;
1267
1268 topo.num_threads_per_core = max_siblings;
1269 if (verbose > 1)
1270 fprintf(stderr, "max_siblings %d\n", max_siblings);
1271
1272 free(cpus);
1273}
1274
1275void
1276allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
1277{
1278 int i;
1279
1280 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
1281 topo.num_packages, sizeof(struct thread_data));
1282 if (*t == NULL)
1283 goto error;
1284
1285 for (i = 0; i < topo.num_threads_per_core *
1286 topo.num_cores_per_pkg * topo.num_packages; i++)
1287 (*t)[i].cpu_id = -1;
1288
1289 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
1290 sizeof(struct core_data));
1291 if (*c == NULL)
1292 goto error;
1293
1294 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
1295 (*c)[i].core_id = -1;
1296
1297 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
1298 if (*p == NULL)
1299 goto error;
1300
1301 for (i = 0; i < topo.num_packages; i++)
1302 (*p)[i].package_id = i;
1303
1304 return;
1305error:
1306 perror("calloc counters");
1307 exit(1);
1308}
1309/*
1310 * init_counter()
1311 *
1312 * set cpu_id, core_num, pkg_num
1313 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1314 *
1315 * increment topo.num_cores when 1st core in pkg seen
1316 */
1317void init_counter(struct thread_data *thread_base, struct core_data *core_base,
1318 struct pkg_data *pkg_base, int thread_num, int core_num,
1319 int pkg_num, int cpu_id)
1320{
1321 struct thread_data *t;
1322 struct core_data *c;
1323 struct pkg_data *p;
1324
1325 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
1326 c = GET_CORE(core_base, core_num, pkg_num);
1327 p = GET_PKG(pkg_base, pkg_num);
1328
1329 t->cpu_id = cpu_id;
1330 if (thread_num == 0) {
1331 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1332 if (cpu_is_first_core_in_package(cpu_id))
1333 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1334 }
1335
1336 c->core_id = core_num;
1337 p->package_id = pkg_num;
1338}
1339
1340
1341int initialize_counters(int cpu_id)
1342{
1343 int my_thread_id, my_core_id, my_package_id;
1344
1345 my_package_id = get_physical_package_id(cpu_id);
1346 my_core_id = get_core_id(cpu_id);
1347
1348 if (cpu_is_first_sibling_in_core(cpu_id)) {
1349 my_thread_id = 0;
1350 topo.num_cores++;
1351 } else {
1352 my_thread_id = 1;
1353 }
1354
1355 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1356 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1357 return 0;
1358}
1359
1360void allocate_output_buffer()
1361{
1362 output_buffer = calloc(1, (1 + topo.num_cpus) * 128);
1363 outp = output_buffer;
1364 if (outp == NULL) {
1365 perror("calloc");
1366 exit(-1);
1367 }
1368}
1369
1370void setup_all_buffers(void)
1371{
1372 topology_probe();
1373 allocate_counters(&thread_even, &core_even, &package_even);
1374 allocate_counters(&thread_odd, &core_odd, &package_odd);
1375 allocate_output_buffer();
1376 for_all_proc_cpus(initialize_counters);
1377}
103a8fea
LB
1378void turbostat_init()
1379{
1380 check_cpuid();
1381
1382 check_dev_msr();
1383 check_super_user();
1384
c98d5d94 1385 setup_all_buffers();
103a8fea
LB
1386
1387 if (verbose)
c98d5d94 1388 print_verbose_header();
103a8fea
LB
1389}
1390
1391int fork_it(char **argv)
1392{
103a8fea 1393 pid_t child_pid;
d15cf7c1 1394
c98d5d94
LB
1395 for_all_cpus(get_counters, EVEN_COUNTERS);
1396 /* clear affinity side-effect of get_counters() */
1397 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
103a8fea
LB
1398 gettimeofday(&tv_even, (struct timezone *)NULL);
1399
1400 child_pid = fork();
1401 if (!child_pid) {
1402 /* child */
1403 execvp(argv[0], argv);
1404 } else {
1405 int status;
1406
1407 /* parent */
1408 if (child_pid == -1) {
1409 perror("fork");
1410 exit(1);
1411 }
1412
1413 signal(SIGINT, SIG_IGN);
1414 signal(SIGQUIT, SIG_IGN);
1415 if (waitpid(child_pid, &status, 0) == -1) {
1416 perror("wait");
1417 exit(1);
1418 }
1419 }
c98d5d94
LB
1420 /*
1421 * n.b. fork_it() does not check for errors from for_all_cpus()
1422 * because re-starting is problematic when forking
1423 */
1424 for_all_cpus(get_counters, ODD_COUNTERS);
103a8fea 1425 gettimeofday(&tv_odd, (struct timezone *)NULL);
103a8fea 1426 timersub(&tv_odd, &tv_even, &tv_delta);
c98d5d94
LB
1427 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1428 compute_average(EVEN_COUNTERS);
1429 format_all_counters(EVEN_COUNTERS);
1430 flush_stderr();
103a8fea 1431
6eab04a8 1432 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
103a8fea
LB
1433
1434 return 0;
1435}
1436
1437void cmdline(int argc, char **argv)
1438{
1439 int opt;
1440
1441 progname = argv[0];
1442
c98d5d94 1443 while ((opt = getopt(argc, argv, "+cpsvi:M:")) != -1) {
103a8fea 1444 switch (opt) {
c98d5d94
LB
1445 case 'c':
1446 show_core_only++;
1447 break;
1448 case 'p':
1449 show_pkg_only++;
1450 break;
e23da037
LB
1451 case 's':
1452 summary_only++;
1453 break;
103a8fea
LB
1454 case 'v':
1455 verbose++;
1456 break;
1457 case 'i':
1458 interval_sec = atoi(optarg);
1459 break;
1460 case 'M':
1461 sscanf(optarg, "%x", &extra_msr_offset);
1462 if (verbose > 1)
1463 fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
1464 break;
1465 default:
1466 usage();
1467 }
1468 }
1469}
1470
1471int main(int argc, char **argv)
1472{
1473 cmdline(argc, argv);
1474
1475 if (verbose > 1)
c98d5d94 1476 fprintf(stderr, "turbostat v2.0 May 16, 2012"
103a8fea 1477 " - Len Brown <lenb@kernel.org>\n");
103a8fea
LB
1478
1479 turbostat_init();
1480
1481 /*
1482 * if any params left, it must be a command to fork
1483 */
1484 if (argc - optind)
1485 return fork_it(argv + optind);
1486 else
1487 turbostat_loop();
1488
1489 return 0;
1490}