]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/power/x86/turbostat/turbostat.c
tools/power turbostat: Intel Xeon x200: fix turbo-ratio decoding
[mirror_ubuntu-bionic-kernel.git] / tools / power / x86 / turbostat / turbostat.c
1 /*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
4 *
5 * Copyright (c) 2013 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include <stdarg.h>
25 #include <stdio.h>
26 #include <err.h>
27 #include <unistd.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <sys/stat.h>
31 #include <sys/resource.h>
32 #include <fcntl.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <stdlib.h>
36 #include <getopt.h>
37 #include <dirent.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <sched.h>
41 #include <time.h>
42 #include <cpuid.h>
43 #include <linux/capability.h>
44 #include <errno.h>
45
46 char *proc_stat = "/proc/stat";
47 struct timespec interval_ts = {5, 0};
48 unsigned int debug;
49 unsigned int rapl_joules;
50 unsigned int summary_only;
51 unsigned int dump_only;
52 unsigned int skip_c0;
53 unsigned int skip_c1;
54 unsigned int do_nhm_cstates;
55 unsigned int do_snb_cstates;
56 unsigned int do_knl_cstates;
57 unsigned int do_pc2;
58 unsigned int do_pc3;
59 unsigned int do_pc6;
60 unsigned int do_pc7;
61 unsigned int do_c8_c9_c10;
62 unsigned int do_skl_residency;
63 unsigned int do_slm_cstates;
64 unsigned int use_c1_residency_msr;
65 unsigned int has_aperf;
66 unsigned int has_epb;
67 unsigned int units = 1000000; /* MHz etc */
68 unsigned int genuine_intel;
69 unsigned int has_invariant_tsc;
70 unsigned int do_nhm_platform_info;
71 unsigned int extra_msr_offset32;
72 unsigned int extra_msr_offset64;
73 unsigned int extra_delta_offset32;
74 unsigned int extra_delta_offset64;
75 unsigned int aperf_mperf_multiplier = 1;
76 int do_smi;
77 double bclk;
78 double base_hz;
79 unsigned int has_base_hz;
80 double tsc_tweak = 1.0;
81 unsigned int show_pkg;
82 unsigned int show_core;
83 unsigned int show_cpu;
84 unsigned int show_pkg_only;
85 unsigned int show_core_only;
86 char *output_buffer, *outp;
87 unsigned int do_rapl;
88 unsigned int do_dts;
89 unsigned int do_ptm;
90 unsigned int tcc_activation_temp;
91 unsigned int tcc_activation_temp_override;
92 double rapl_power_units, rapl_time_units;
93 double rapl_dram_energy_units, rapl_energy_units;
94 double rapl_joule_counter_range;
95 unsigned int do_core_perf_limit_reasons;
96 unsigned int do_gfx_perf_limit_reasons;
97 unsigned int do_ring_perf_limit_reasons;
98 unsigned int crystal_hz;
99 unsigned long long tsc_hz;
100 int base_cpu;
101 double discover_bclk(unsigned int family, unsigned int model);
102 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
103 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
104 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
105 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
106 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
107 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
108
109 #define RAPL_PKG (1 << 0)
110 /* 0x610 MSR_PKG_POWER_LIMIT */
111 /* 0x611 MSR_PKG_ENERGY_STATUS */
112 #define RAPL_PKG_PERF_STATUS (1 << 1)
113 /* 0x613 MSR_PKG_PERF_STATUS */
114 #define RAPL_PKG_POWER_INFO (1 << 2)
115 /* 0x614 MSR_PKG_POWER_INFO */
116
117 #define RAPL_DRAM (1 << 3)
118 /* 0x618 MSR_DRAM_POWER_LIMIT */
119 /* 0x619 MSR_DRAM_ENERGY_STATUS */
120 #define RAPL_DRAM_PERF_STATUS (1 << 4)
121 /* 0x61b MSR_DRAM_PERF_STATUS */
122 #define RAPL_DRAM_POWER_INFO (1 << 5)
123 /* 0x61c MSR_DRAM_POWER_INFO */
124
125 #define RAPL_CORES (1 << 6)
126 /* 0x638 MSR_PP0_POWER_LIMIT */
127 /* 0x639 MSR_PP0_ENERGY_STATUS */
128 #define RAPL_CORE_POLICY (1 << 7)
129 /* 0x63a MSR_PP0_POLICY */
130
131 #define RAPL_GFX (1 << 8)
132 /* 0x640 MSR_PP1_POWER_LIMIT */
133 /* 0x641 MSR_PP1_ENERGY_STATUS */
134 /* 0x642 MSR_PP1_POLICY */
135 #define TJMAX_DEFAULT 100
136
137 #define MAX(a, b) ((a) > (b) ? (a) : (b))
138
139 int aperf_mperf_unstable;
140 int backwards_count;
141 char *progname;
142
143 cpu_set_t *cpu_present_set, *cpu_affinity_set;
144 size_t cpu_present_setsize, cpu_affinity_setsize;
145
146 struct thread_data {
147 unsigned long long tsc;
148 unsigned long long aperf;
149 unsigned long long mperf;
150 unsigned long long c1;
151 unsigned long long extra_msr64;
152 unsigned long long extra_delta64;
153 unsigned long long extra_msr32;
154 unsigned long long extra_delta32;
155 unsigned int smi_count;
156 unsigned int cpu_id;
157 unsigned int flags;
158 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
159 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
160 } *thread_even, *thread_odd;
161
162 struct core_data {
163 unsigned long long c3;
164 unsigned long long c6;
165 unsigned long long c7;
166 unsigned int core_temp_c;
167 unsigned int core_id;
168 } *core_even, *core_odd;
169
170 struct pkg_data {
171 unsigned long long pc2;
172 unsigned long long pc3;
173 unsigned long long pc6;
174 unsigned long long pc7;
175 unsigned long long pc8;
176 unsigned long long pc9;
177 unsigned long long pc10;
178 unsigned long long pkg_wtd_core_c0;
179 unsigned long long pkg_any_core_c0;
180 unsigned long long pkg_any_gfxe_c0;
181 unsigned long long pkg_both_core_gfxe_c0;
182 unsigned int package_id;
183 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
184 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
185 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
186 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
187 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
188 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
189 unsigned int pkg_temp_c;
190
191 } *package_even, *package_odd;
192
193 #define ODD_COUNTERS thread_odd, core_odd, package_odd
194 #define EVEN_COUNTERS thread_even, core_even, package_even
195
196 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
197 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
198 topo.num_threads_per_core + \
199 (core_no) * topo.num_threads_per_core + (thread_no))
200 #define GET_CORE(core_base, core_no, pkg_no) \
201 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
202 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
203
204 struct system_summary {
205 struct thread_data threads;
206 struct core_data cores;
207 struct pkg_data packages;
208 } sum, average;
209
210
211 struct topo_params {
212 int num_packages;
213 int num_cpus;
214 int num_cores;
215 int max_cpu_num;
216 int num_cores_per_pkg;
217 int num_threads_per_core;
218 } topo;
219
220 struct timeval tv_even, tv_odd, tv_delta;
221
222 void setup_all_buffers(void);
223
224 int cpu_is_not_present(int cpu)
225 {
226 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
227 }
228 /*
229 * run func(thread, core, package) in topology order
230 * skip non-present cpus
231 */
232
233 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
234 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
235 {
236 int retval, pkg_no, core_no, thread_no;
237
238 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
239 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
240 for (thread_no = 0; thread_no <
241 topo.num_threads_per_core; ++thread_no) {
242 struct thread_data *t;
243 struct core_data *c;
244 struct pkg_data *p;
245
246 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
247
248 if (cpu_is_not_present(t->cpu_id))
249 continue;
250
251 c = GET_CORE(core_base, core_no, pkg_no);
252 p = GET_PKG(pkg_base, pkg_no);
253
254 retval = func(t, c, p);
255 if (retval)
256 return retval;
257 }
258 }
259 }
260 return 0;
261 }
262
263 int cpu_migrate(int cpu)
264 {
265 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
266 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
267 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
268 return -1;
269 else
270 return 0;
271 }
272
273 int get_msr(int cpu, off_t offset, unsigned long long *msr)
274 {
275 ssize_t retval;
276 char pathname[32];
277 int fd;
278
279 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
280 fd = open(pathname, O_RDONLY);
281 if (fd < 0)
282 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
283
284 retval = pread(fd, msr, sizeof *msr, offset);
285 close(fd);
286
287 if (retval != sizeof *msr)
288 err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
289
290 return 0;
291 }
292
293 /*
294 * Example Format w/ field column widths:
295 *
296 * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz SMI %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
297 * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
298 */
299
300 void print_header(void)
301 {
302 if (show_pkg)
303 outp += sprintf(outp, " Package");
304 if (show_core)
305 outp += sprintf(outp, " Core");
306 if (show_cpu)
307 outp += sprintf(outp, " CPU");
308 if (has_aperf)
309 outp += sprintf(outp, " Avg_MHz");
310 if (has_aperf)
311 outp += sprintf(outp, " %%Busy");
312 if (has_aperf)
313 outp += sprintf(outp, " Bzy_MHz");
314 outp += sprintf(outp, " TSC_MHz");
315
316 if (extra_delta_offset32)
317 outp += sprintf(outp, " count 0x%03X", extra_delta_offset32);
318 if (extra_delta_offset64)
319 outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64);
320 if (extra_msr_offset32)
321 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32);
322 if (extra_msr_offset64)
323 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
324
325 if (!debug)
326 goto done;
327
328 if (do_smi)
329 outp += sprintf(outp, " SMI");
330
331 if (do_nhm_cstates)
332 outp += sprintf(outp, " CPU%%c1");
333 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
334 outp += sprintf(outp, " CPU%%c3");
335 if (do_nhm_cstates)
336 outp += sprintf(outp, " CPU%%c6");
337 if (do_snb_cstates)
338 outp += sprintf(outp, " CPU%%c7");
339
340 if (do_dts)
341 outp += sprintf(outp, " CoreTmp");
342 if (do_ptm)
343 outp += sprintf(outp, " PkgTmp");
344
345 if (do_skl_residency) {
346 outp += sprintf(outp, " Totl%%C0");
347 outp += sprintf(outp, " Any%%C0");
348 outp += sprintf(outp, " GFX%%C0");
349 outp += sprintf(outp, " CPUGFX%%");
350 }
351
352 if (do_pc2)
353 outp += sprintf(outp, " Pkg%%pc2");
354 if (do_pc3)
355 outp += sprintf(outp, " Pkg%%pc3");
356 if (do_pc6)
357 outp += sprintf(outp, " Pkg%%pc6");
358 if (do_pc7)
359 outp += sprintf(outp, " Pkg%%pc7");
360 if (do_c8_c9_c10) {
361 outp += sprintf(outp, " Pkg%%pc8");
362 outp += sprintf(outp, " Pkg%%pc9");
363 outp += sprintf(outp, " Pk%%pc10");
364 }
365
366 if (do_rapl && !rapl_joules) {
367 if (do_rapl & RAPL_PKG)
368 outp += sprintf(outp, " PkgWatt");
369 if (do_rapl & RAPL_CORES)
370 outp += sprintf(outp, " CorWatt");
371 if (do_rapl & RAPL_GFX)
372 outp += sprintf(outp, " GFXWatt");
373 if (do_rapl & RAPL_DRAM)
374 outp += sprintf(outp, " RAMWatt");
375 if (do_rapl & RAPL_PKG_PERF_STATUS)
376 outp += sprintf(outp, " PKG_%%");
377 if (do_rapl & RAPL_DRAM_PERF_STATUS)
378 outp += sprintf(outp, " RAM_%%");
379 } else if (do_rapl && rapl_joules) {
380 if (do_rapl & RAPL_PKG)
381 outp += sprintf(outp, " Pkg_J");
382 if (do_rapl & RAPL_CORES)
383 outp += sprintf(outp, " Cor_J");
384 if (do_rapl & RAPL_GFX)
385 outp += sprintf(outp, " GFX_J");
386 if (do_rapl & RAPL_DRAM)
387 outp += sprintf(outp, " RAM_J");
388 if (do_rapl & RAPL_PKG_PERF_STATUS)
389 outp += sprintf(outp, " PKG_%%");
390 if (do_rapl & RAPL_DRAM_PERF_STATUS)
391 outp += sprintf(outp, " RAM_%%");
392 outp += sprintf(outp, " time");
393
394 }
395 done:
396 outp += sprintf(outp, "\n");
397 }
398
399 int dump_counters(struct thread_data *t, struct core_data *c,
400 struct pkg_data *p)
401 {
402 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
403
404 if (t) {
405 outp += sprintf(outp, "CPU: %d flags 0x%x\n",
406 t->cpu_id, t->flags);
407 outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
408 outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
409 outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
410 outp += sprintf(outp, "c1: %016llX\n", t->c1);
411 outp += sprintf(outp, "msr0x%x: %08llX\n",
412 extra_delta_offset32, t->extra_delta32);
413 outp += sprintf(outp, "msr0x%x: %016llX\n",
414 extra_delta_offset64, t->extra_delta64);
415 outp += sprintf(outp, "msr0x%x: %08llX\n",
416 extra_msr_offset32, t->extra_msr32);
417 outp += sprintf(outp, "msr0x%x: %016llX\n",
418 extra_msr_offset64, t->extra_msr64);
419 if (do_smi)
420 outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
421 }
422
423 if (c) {
424 outp += sprintf(outp, "core: %d\n", c->core_id);
425 outp += sprintf(outp, "c3: %016llX\n", c->c3);
426 outp += sprintf(outp, "c6: %016llX\n", c->c6);
427 outp += sprintf(outp, "c7: %016llX\n", c->c7);
428 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
429 }
430
431 if (p) {
432 outp += sprintf(outp, "package: %d\n", p->package_id);
433
434 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
435 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
436 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
437 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
438
439 outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
440 if (do_pc3)
441 outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
442 if (do_pc6)
443 outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
444 if (do_pc7)
445 outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
446 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
447 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
448 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
449 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
450 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
451 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
452 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
453 outp += sprintf(outp, "Throttle PKG: %0X\n",
454 p->rapl_pkg_perf_status);
455 outp += sprintf(outp, "Throttle RAM: %0X\n",
456 p->rapl_dram_perf_status);
457 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
458 }
459
460 outp += sprintf(outp, "\n");
461
462 return 0;
463 }
464
465 /*
466 * column formatting convention & formats
467 */
468 int format_counters(struct thread_data *t, struct core_data *c,
469 struct pkg_data *p)
470 {
471 double interval_float;
472 char *fmt8;
473
474 /* if showing only 1st thread in core and this isn't one, bail out */
475 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
476 return 0;
477
478 /* if showing only 1st thread in pkg and this isn't one, bail out */
479 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
480 return 0;
481
482 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
483
484 /* topo columns, print blanks on 1st (average) line */
485 if (t == &average.threads) {
486 if (show_pkg)
487 outp += sprintf(outp, " -");
488 if (show_core)
489 outp += sprintf(outp, " -");
490 if (show_cpu)
491 outp += sprintf(outp, " -");
492 } else {
493 if (show_pkg) {
494 if (p)
495 outp += sprintf(outp, "%8d", p->package_id);
496 else
497 outp += sprintf(outp, " -");
498 }
499 if (show_core) {
500 if (c)
501 outp += sprintf(outp, "%8d", c->core_id);
502 else
503 outp += sprintf(outp, " -");
504 }
505 if (show_cpu)
506 outp += sprintf(outp, "%8d", t->cpu_id);
507 }
508
509 /* Avg_MHz */
510 if (has_aperf)
511 outp += sprintf(outp, "%8.0f",
512 1.0 / units * t->aperf / interval_float);
513
514 /* %Busy */
515 if (has_aperf) {
516 if (!skip_c0)
517 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
518 else
519 outp += sprintf(outp, "********");
520 }
521
522 /* Bzy_MHz */
523 if (has_aperf) {
524 if (has_base_hz)
525 outp += sprintf(outp, "%8.0f", base_hz / units * t->aperf / t->mperf);
526 else
527 outp += sprintf(outp, "%8.0f",
528 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
529 }
530
531 /* TSC_MHz */
532 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
533
534 /* delta */
535 if (extra_delta_offset32)
536 outp += sprintf(outp, " %11llu", t->extra_delta32);
537
538 /* DELTA */
539 if (extra_delta_offset64)
540 outp += sprintf(outp, " %11llu", t->extra_delta64);
541 /* msr */
542 if (extra_msr_offset32)
543 outp += sprintf(outp, " 0x%08llx", t->extra_msr32);
544
545 /* MSR */
546 if (extra_msr_offset64)
547 outp += sprintf(outp, " 0x%016llx", t->extra_msr64);
548
549 if (!debug)
550 goto done;
551
552 /* SMI */
553 if (do_smi)
554 outp += sprintf(outp, "%8d", t->smi_count);
555
556 if (do_nhm_cstates) {
557 if (!skip_c1)
558 outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
559 else
560 outp += sprintf(outp, "********");
561 }
562
563 /* print per-core data only for 1st thread in core */
564 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
565 goto done;
566
567 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
568 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
569 if (do_nhm_cstates)
570 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
571 if (do_snb_cstates)
572 outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
573
574 if (do_dts)
575 outp += sprintf(outp, "%8d", c->core_temp_c);
576
577 /* print per-package data only for 1st core in package */
578 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
579 goto done;
580
581 /* PkgTmp */
582 if (do_ptm)
583 outp += sprintf(outp, "%8d", p->pkg_temp_c);
584
585 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
586 if (do_skl_residency) {
587 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
588 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
589 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
590 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
591 }
592
593 if (do_pc2)
594 outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
595 if (do_pc3)
596 outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
597 if (do_pc6)
598 outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
599 if (do_pc7)
600 outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
601 if (do_c8_c9_c10) {
602 outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
603 outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
604 outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
605 }
606
607 /*
608 * If measurement interval exceeds minimum RAPL Joule Counter range,
609 * indicate that results are suspect by printing "**" in fraction place.
610 */
611 if (interval_float < rapl_joule_counter_range)
612 fmt8 = "%8.2f";
613 else
614 fmt8 = " %6.0f**";
615
616 if (do_rapl && !rapl_joules) {
617 if (do_rapl & RAPL_PKG)
618 outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
619 if (do_rapl & RAPL_CORES)
620 outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
621 if (do_rapl & RAPL_GFX)
622 outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
623 if (do_rapl & RAPL_DRAM)
624 outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
625 if (do_rapl & RAPL_PKG_PERF_STATUS)
626 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
627 if (do_rapl & RAPL_DRAM_PERF_STATUS)
628 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
629 } else if (do_rapl && rapl_joules) {
630 if (do_rapl & RAPL_PKG)
631 outp += sprintf(outp, fmt8,
632 p->energy_pkg * rapl_energy_units);
633 if (do_rapl & RAPL_CORES)
634 outp += sprintf(outp, fmt8,
635 p->energy_cores * rapl_energy_units);
636 if (do_rapl & RAPL_GFX)
637 outp += sprintf(outp, fmt8,
638 p->energy_gfx * rapl_energy_units);
639 if (do_rapl & RAPL_DRAM)
640 outp += sprintf(outp, fmt8,
641 p->energy_dram * rapl_dram_energy_units);
642 if (do_rapl & RAPL_PKG_PERF_STATUS)
643 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
644 if (do_rapl & RAPL_DRAM_PERF_STATUS)
645 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
646
647 outp += sprintf(outp, fmt8, interval_float);
648 }
649 done:
650 outp += sprintf(outp, "\n");
651
652 return 0;
653 }
654
655 void flush_stdout()
656 {
657 fputs(output_buffer, stdout);
658 fflush(stdout);
659 outp = output_buffer;
660 }
661 void flush_stderr()
662 {
663 fputs(output_buffer, stderr);
664 outp = output_buffer;
665 }
666 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
667 {
668 static int printed;
669
670 if (!printed || !summary_only)
671 print_header();
672
673 if (topo.num_cpus > 1)
674 format_counters(&average.threads, &average.cores,
675 &average.packages);
676
677 printed = 1;
678
679 if (summary_only)
680 return;
681
682 for_all_cpus(format_counters, t, c, p);
683 }
684
685 #define DELTA_WRAP32(new, old) \
686 if (new > old) { \
687 old = new - old; \
688 } else { \
689 old = 0x100000000 + new - old; \
690 }
691
692 void
693 delta_package(struct pkg_data *new, struct pkg_data *old)
694 {
695
696 if (do_skl_residency) {
697 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
698 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
699 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
700 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
701 }
702 old->pc2 = new->pc2 - old->pc2;
703 if (do_pc3)
704 old->pc3 = new->pc3 - old->pc3;
705 if (do_pc6)
706 old->pc6 = new->pc6 - old->pc6;
707 if (do_pc7)
708 old->pc7 = new->pc7 - old->pc7;
709 old->pc8 = new->pc8 - old->pc8;
710 old->pc9 = new->pc9 - old->pc9;
711 old->pc10 = new->pc10 - old->pc10;
712 old->pkg_temp_c = new->pkg_temp_c;
713
714 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
715 DELTA_WRAP32(new->energy_cores, old->energy_cores);
716 DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
717 DELTA_WRAP32(new->energy_dram, old->energy_dram);
718 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
719 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
720 }
721
722 void
723 delta_core(struct core_data *new, struct core_data *old)
724 {
725 old->c3 = new->c3 - old->c3;
726 old->c6 = new->c6 - old->c6;
727 old->c7 = new->c7 - old->c7;
728 old->core_temp_c = new->core_temp_c;
729 }
730
731 /*
732 * old = new - old
733 */
734 void
735 delta_thread(struct thread_data *new, struct thread_data *old,
736 struct core_data *core_delta)
737 {
738 old->tsc = new->tsc - old->tsc;
739
740 /* check for TSC < 1 Mcycles over interval */
741 if (old->tsc < (1000 * 1000))
742 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
743 "You can disable all c-states by booting with \"idle=poll\"\n"
744 "or just the deep ones with \"processor.max_cstate=1\"");
745
746 old->c1 = new->c1 - old->c1;
747
748 if (has_aperf) {
749 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
750 old->aperf = new->aperf - old->aperf;
751 old->mperf = new->mperf - old->mperf;
752 } else {
753
754 if (!aperf_mperf_unstable) {
755 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
756 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
757 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
758
759 aperf_mperf_unstable = 1;
760 }
761 /*
762 * mperf delta is likely a huge "positive" number
763 * can not use it for calculating c0 time
764 */
765 skip_c0 = 1;
766 skip_c1 = 1;
767 }
768 }
769
770
771 if (use_c1_residency_msr) {
772 /*
773 * Some models have a dedicated C1 residency MSR,
774 * which should be more accurate than the derivation below.
775 */
776 } else {
777 /*
778 * As counter collection is not atomic,
779 * it is possible for mperf's non-halted cycles + idle states
780 * to exceed TSC's all cycles: show c1 = 0% in that case.
781 */
782 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
783 old->c1 = 0;
784 else {
785 /* normal case, derive c1 */
786 old->c1 = old->tsc - old->mperf - core_delta->c3
787 - core_delta->c6 - core_delta->c7;
788 }
789 }
790
791 if (old->mperf == 0) {
792 if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
793 old->mperf = 1; /* divide by 0 protection */
794 }
795
796 old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
797 old->extra_delta32 &= 0xFFFFFFFF;
798
799 old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
800
801 /*
802 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
803 */
804 old->extra_msr32 = new->extra_msr32;
805 old->extra_msr64 = new->extra_msr64;
806
807 if (do_smi)
808 old->smi_count = new->smi_count - old->smi_count;
809 }
810
811 int delta_cpu(struct thread_data *t, struct core_data *c,
812 struct pkg_data *p, struct thread_data *t2,
813 struct core_data *c2, struct pkg_data *p2)
814 {
815 /* calculate core delta only for 1st thread in core */
816 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
817 delta_core(c, c2);
818
819 /* always calculate thread delta */
820 delta_thread(t, t2, c2); /* c2 is core delta */
821
822 /* calculate package delta only for 1st core in package */
823 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
824 delta_package(p, p2);
825
826 return 0;
827 }
828
829 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
830 {
831 t->tsc = 0;
832 t->aperf = 0;
833 t->mperf = 0;
834 t->c1 = 0;
835
836 t->smi_count = 0;
837 t->extra_delta32 = 0;
838 t->extra_delta64 = 0;
839
840 /* tells format_counters to dump all fields from this set */
841 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
842
843 c->c3 = 0;
844 c->c6 = 0;
845 c->c7 = 0;
846 c->core_temp_c = 0;
847
848 p->pkg_wtd_core_c0 = 0;
849 p->pkg_any_core_c0 = 0;
850 p->pkg_any_gfxe_c0 = 0;
851 p->pkg_both_core_gfxe_c0 = 0;
852
853 p->pc2 = 0;
854 if (do_pc3)
855 p->pc3 = 0;
856 if (do_pc6)
857 p->pc6 = 0;
858 if (do_pc7)
859 p->pc7 = 0;
860 p->pc8 = 0;
861 p->pc9 = 0;
862 p->pc10 = 0;
863
864 p->energy_pkg = 0;
865 p->energy_dram = 0;
866 p->energy_cores = 0;
867 p->energy_gfx = 0;
868 p->rapl_pkg_perf_status = 0;
869 p->rapl_dram_perf_status = 0;
870 p->pkg_temp_c = 0;
871 }
872 int sum_counters(struct thread_data *t, struct core_data *c,
873 struct pkg_data *p)
874 {
875 average.threads.tsc += t->tsc;
876 average.threads.aperf += t->aperf;
877 average.threads.mperf += t->mperf;
878 average.threads.c1 += t->c1;
879
880 average.threads.extra_delta32 += t->extra_delta32;
881 average.threads.extra_delta64 += t->extra_delta64;
882
883 /* sum per-core values only for 1st thread in core */
884 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
885 return 0;
886
887 average.cores.c3 += c->c3;
888 average.cores.c6 += c->c6;
889 average.cores.c7 += c->c7;
890
891 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
892
893 /* sum per-pkg values only for 1st core in pkg */
894 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
895 return 0;
896
897 if (do_skl_residency) {
898 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
899 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
900 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
901 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
902 }
903
904 average.packages.pc2 += p->pc2;
905 if (do_pc3)
906 average.packages.pc3 += p->pc3;
907 if (do_pc6)
908 average.packages.pc6 += p->pc6;
909 if (do_pc7)
910 average.packages.pc7 += p->pc7;
911 average.packages.pc8 += p->pc8;
912 average.packages.pc9 += p->pc9;
913 average.packages.pc10 += p->pc10;
914
915 average.packages.energy_pkg += p->energy_pkg;
916 average.packages.energy_dram += p->energy_dram;
917 average.packages.energy_cores += p->energy_cores;
918 average.packages.energy_gfx += p->energy_gfx;
919
920 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
921
922 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
923 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
924 return 0;
925 }
926 /*
927 * sum the counters for all cpus in the system
928 * compute the weighted average
929 */
930 void compute_average(struct thread_data *t, struct core_data *c,
931 struct pkg_data *p)
932 {
933 clear_counters(&average.threads, &average.cores, &average.packages);
934
935 for_all_cpus(sum_counters, t, c, p);
936
937 average.threads.tsc /= topo.num_cpus;
938 average.threads.aperf /= topo.num_cpus;
939 average.threads.mperf /= topo.num_cpus;
940 average.threads.c1 /= topo.num_cpus;
941
942 average.threads.extra_delta32 /= topo.num_cpus;
943 average.threads.extra_delta32 &= 0xFFFFFFFF;
944
945 average.threads.extra_delta64 /= topo.num_cpus;
946
947 average.cores.c3 /= topo.num_cores;
948 average.cores.c6 /= topo.num_cores;
949 average.cores.c7 /= topo.num_cores;
950
951 if (do_skl_residency) {
952 average.packages.pkg_wtd_core_c0 /= topo.num_packages;
953 average.packages.pkg_any_core_c0 /= topo.num_packages;
954 average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
955 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
956 }
957
958 average.packages.pc2 /= topo.num_packages;
959 if (do_pc3)
960 average.packages.pc3 /= topo.num_packages;
961 if (do_pc6)
962 average.packages.pc6 /= topo.num_packages;
963 if (do_pc7)
964 average.packages.pc7 /= topo.num_packages;
965
966 average.packages.pc8 /= topo.num_packages;
967 average.packages.pc9 /= topo.num_packages;
968 average.packages.pc10 /= topo.num_packages;
969 }
970
971 static unsigned long long rdtsc(void)
972 {
973 unsigned int low, high;
974
975 asm volatile("rdtsc" : "=a" (low), "=d" (high));
976
977 return low | ((unsigned long long)high) << 32;
978 }
979
980
981 /*
982 * get_counters(...)
983 * migrate to cpu
984 * acquire and record local counters for that cpu
985 */
986 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
987 {
988 int cpu = t->cpu_id;
989 unsigned long long msr;
990
991 if (cpu_migrate(cpu)) {
992 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
993 return -1;
994 }
995
996 t->tsc = rdtsc(); /* we are running on local CPU of interest */
997
998 if (has_aperf) {
999 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1000 return -3;
1001 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
1002 return -4;
1003 t->aperf = t->aperf * aperf_mperf_multiplier;
1004 t->mperf = t->mperf * aperf_mperf_multiplier;
1005 }
1006
1007 if (do_smi) {
1008 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
1009 return -5;
1010 t->smi_count = msr & 0xFFFFFFFF;
1011 }
1012 if (extra_delta_offset32) {
1013 if (get_msr(cpu, extra_delta_offset32, &msr))
1014 return -5;
1015 t->extra_delta32 = msr & 0xFFFFFFFF;
1016 }
1017
1018 if (extra_delta_offset64)
1019 if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
1020 return -5;
1021
1022 if (extra_msr_offset32) {
1023 if (get_msr(cpu, extra_msr_offset32, &msr))
1024 return -5;
1025 t->extra_msr32 = msr & 0xFFFFFFFF;
1026 }
1027
1028 if (extra_msr_offset64)
1029 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
1030 return -5;
1031
1032 if (use_c1_residency_msr) {
1033 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1034 return -6;
1035 }
1036
1037 /* collect core counters only for 1st thread in core */
1038 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1039 return 0;
1040
1041 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
1042 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1043 return -6;
1044 }
1045
1046 if (do_nhm_cstates && !do_knl_cstates) {
1047 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1048 return -7;
1049 } else if (do_knl_cstates) {
1050 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1051 return -7;
1052 }
1053
1054 if (do_snb_cstates)
1055 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1056 return -8;
1057
1058 if (do_dts) {
1059 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1060 return -9;
1061 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1062 }
1063
1064
1065 /* collect package counters only for 1st core in package */
1066 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1067 return 0;
1068
1069 if (do_skl_residency) {
1070 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1071 return -10;
1072 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1073 return -11;
1074 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1075 return -12;
1076 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1077 return -13;
1078 }
1079 if (do_pc3)
1080 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1081 return -9;
1082 if (do_pc6)
1083 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1084 return -10;
1085 if (do_pc2)
1086 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1087 return -11;
1088 if (do_pc7)
1089 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1090 return -12;
1091 if (do_c8_c9_c10) {
1092 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1093 return -13;
1094 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1095 return -13;
1096 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1097 return -13;
1098 }
1099 if (do_rapl & RAPL_PKG) {
1100 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1101 return -13;
1102 p->energy_pkg = msr & 0xFFFFFFFF;
1103 }
1104 if (do_rapl & RAPL_CORES) {
1105 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1106 return -14;
1107 p->energy_cores = msr & 0xFFFFFFFF;
1108 }
1109 if (do_rapl & RAPL_DRAM) {
1110 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1111 return -15;
1112 p->energy_dram = msr & 0xFFFFFFFF;
1113 }
1114 if (do_rapl & RAPL_GFX) {
1115 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1116 return -16;
1117 p->energy_gfx = msr & 0xFFFFFFFF;
1118 }
1119 if (do_rapl & RAPL_PKG_PERF_STATUS) {
1120 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1121 return -16;
1122 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1123 }
1124 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1125 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1126 return -16;
1127 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1128 }
1129 if (do_ptm) {
1130 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1131 return -17;
1132 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1133 }
1134 return 0;
1135 }
1136
1137 /*
1138 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1139 * If you change the values, note they are used both in comparisons
1140 * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1141 */
1142
1143 #define PCLUKN 0 /* Unknown */
1144 #define PCLRSV 1 /* Reserved */
1145 #define PCL__0 2 /* PC0 */
1146 #define PCL__1 3 /* PC1 */
1147 #define PCL__2 4 /* PC2 */
1148 #define PCL__3 5 /* PC3 */
1149 #define PCL__4 6 /* PC4 */
1150 #define PCL__6 7 /* PC6 */
1151 #define PCL_6N 8 /* PC6 No Retention */
1152 #define PCL_6R 9 /* PC6 Retention */
1153 #define PCL__7 10 /* PC7 */
1154 #define PCL_7S 11 /* PC7 Shrink */
1155 #define PCL__8 12 /* PC8 */
1156 #define PCL__9 13 /* PC9 */
1157 #define PCLUNL 14 /* Unlimited */
1158
1159 int pkg_cstate_limit = PCLUKN;
1160 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1161 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1162
1163 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1164 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1165 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1166 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1167 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1168 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1169
1170
1171 static void
1172 calculate_tsc_tweak()
1173 {
1174 tsc_tweak = base_hz / tsc_hz;
1175 }
1176
1177 static void
1178 dump_nhm_platform_info(void)
1179 {
1180 unsigned long long msr;
1181 unsigned int ratio;
1182
1183 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1184
1185 fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1186
1187 ratio = (msr >> 40) & 0xFF;
1188 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1189 ratio, bclk, ratio * bclk);
1190
1191 ratio = (msr >> 8) & 0xFF;
1192 fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1193 ratio, bclk, ratio * bclk);
1194
1195 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1196 fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1197 base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1198
1199 return;
1200 }
1201
1202 static void
1203 dump_hsw_turbo_ratio_limits(void)
1204 {
1205 unsigned long long msr;
1206 unsigned int ratio;
1207
1208 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1209
1210 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1211
1212 ratio = (msr >> 8) & 0xFF;
1213 if (ratio)
1214 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1215 ratio, bclk, ratio * bclk);
1216
1217 ratio = (msr >> 0) & 0xFF;
1218 if (ratio)
1219 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1220 ratio, bclk, ratio * bclk);
1221 return;
1222 }
1223
1224 static void
1225 dump_ivt_turbo_ratio_limits(void)
1226 {
1227 unsigned long long msr;
1228 unsigned int ratio;
1229
1230 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1231
1232 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1233
1234 ratio = (msr >> 56) & 0xFF;
1235 if (ratio)
1236 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1237 ratio, bclk, ratio * bclk);
1238
1239 ratio = (msr >> 48) & 0xFF;
1240 if (ratio)
1241 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1242 ratio, bclk, ratio * bclk);
1243
1244 ratio = (msr >> 40) & 0xFF;
1245 if (ratio)
1246 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1247 ratio, bclk, ratio * bclk);
1248
1249 ratio = (msr >> 32) & 0xFF;
1250 if (ratio)
1251 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1252 ratio, bclk, ratio * bclk);
1253
1254 ratio = (msr >> 24) & 0xFF;
1255 if (ratio)
1256 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1257 ratio, bclk, ratio * bclk);
1258
1259 ratio = (msr >> 16) & 0xFF;
1260 if (ratio)
1261 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1262 ratio, bclk, ratio * bclk);
1263
1264 ratio = (msr >> 8) & 0xFF;
1265 if (ratio)
1266 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1267 ratio, bclk, ratio * bclk);
1268
1269 ratio = (msr >> 0) & 0xFF;
1270 if (ratio)
1271 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1272 ratio, bclk, ratio * bclk);
1273 return;
1274 }
1275
1276 static void
1277 dump_nhm_turbo_ratio_limits(void)
1278 {
1279 unsigned long long msr;
1280 unsigned int ratio;
1281
1282 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1283
1284 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1285
1286 ratio = (msr >> 56) & 0xFF;
1287 if (ratio)
1288 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1289 ratio, bclk, ratio * bclk);
1290
1291 ratio = (msr >> 48) & 0xFF;
1292 if (ratio)
1293 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1294 ratio, bclk, ratio * bclk);
1295
1296 ratio = (msr >> 40) & 0xFF;
1297 if (ratio)
1298 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1299 ratio, bclk, ratio * bclk);
1300
1301 ratio = (msr >> 32) & 0xFF;
1302 if (ratio)
1303 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1304 ratio, bclk, ratio * bclk);
1305
1306 ratio = (msr >> 24) & 0xFF;
1307 if (ratio)
1308 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1309 ratio, bclk, ratio * bclk);
1310
1311 ratio = (msr >> 16) & 0xFF;
1312 if (ratio)
1313 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1314 ratio, bclk, ratio * bclk);
1315
1316 ratio = (msr >> 8) & 0xFF;
1317 if (ratio)
1318 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1319 ratio, bclk, ratio * bclk);
1320
1321 ratio = (msr >> 0) & 0xFF;
1322 if (ratio)
1323 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1324 ratio, bclk, ratio * bclk);
1325 return;
1326 }
1327
1328 static void
1329 dump_knl_turbo_ratio_limits(void)
1330 {
1331 const unsigned int buckets_no = 7;
1332
1333 unsigned long long msr;
1334 int delta_cores, delta_ratio;
1335 int i, b_nr;
1336 unsigned int cores[buckets_no];
1337 unsigned int ratio[buckets_no];
1338
1339 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1340
1341 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
1342 base_cpu, msr);
1343
1344 /**
1345 * Turbo encoding in KNL is as follows:
1346 * [0] -- Reserved
1347 * [7:1] -- Base value of number of active cores of bucket 1.
1348 * [15:8] -- Base value of freq ratio of bucket 1.
1349 * [20:16] -- +ve delta of number of active cores of bucket 2.
1350 * i.e. active cores of bucket 2 =
1351 * active cores of bucket 1 + delta
1352 * [23:21] -- Negative delta of freq ratio of bucket 2.
1353 * i.e. freq ratio of bucket 2 =
1354 * freq ratio of bucket 1 - delta
1355 * [28:24]-- +ve delta of number of active cores of bucket 3.
1356 * [31:29]-- -ve delta of freq ratio of bucket 3.
1357 * [36:32]-- +ve delta of number of active cores of bucket 4.
1358 * [39:37]-- -ve delta of freq ratio of bucket 4.
1359 * [44:40]-- +ve delta of number of active cores of bucket 5.
1360 * [47:45]-- -ve delta of freq ratio of bucket 5.
1361 * [52:48]-- +ve delta of number of active cores of bucket 6.
1362 * [55:53]-- -ve delta of freq ratio of bucket 6.
1363 * [60:56]-- +ve delta of number of active cores of bucket 7.
1364 * [63:61]-- -ve delta of freq ratio of bucket 7.
1365 */
1366
1367 b_nr = 0;
1368 cores[b_nr] = (msr & 0xFF) >> 1;
1369 ratio[b_nr] = (msr >> 8) & 0xFF;
1370
1371 for (i = 16; i < 64; i += 8) {
1372 delta_cores = (msr >> i) & 0x1F;
1373 delta_ratio = (msr >> (i + 5)) & 0x7;
1374
1375 cores[b_nr + 1] = cores[b_nr] + delta_cores;
1376 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio;
1377 b_nr++;
1378 }
1379
1380 for (i = buckets_no - 1; i >= 0; i--)
1381 if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
1382 fprintf(stderr,
1383 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1384 ratio[i], bclk, ratio[i] * bclk, cores[i]);
1385 }
1386
1387 static void
1388 dump_nhm_cst_cfg(void)
1389 {
1390 unsigned long long msr;
1391
1392 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1393
1394 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1395 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
1396
1397 fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
1398
1399 fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1400 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1401 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1402 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1403 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1404 (msr & (1 << 15)) ? "" : "UN",
1405 (unsigned int)msr & 7,
1406 pkg_cstate_limit_strings[pkg_cstate_limit]);
1407 return;
1408 }
1409
1410 static void
1411 dump_config_tdp(void)
1412 {
1413 unsigned long long msr;
1414
1415 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
1416 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
1417 fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF);
1418
1419 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
1420 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
1421 if (msr) {
1422 fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1423 fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1424 fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1425 fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF);
1426 }
1427 fprintf(stderr, ")\n");
1428
1429 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
1430 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
1431 if (msr) {
1432 fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1433 fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1434 fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1435 fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF);
1436 }
1437 fprintf(stderr, ")\n");
1438
1439 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
1440 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
1441 if ((msr) & 0x3)
1442 fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
1443 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1444 fprintf(stderr, ")\n");
1445
1446 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
1447 fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
1448 fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0x7F);
1449 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1450 fprintf(stderr, ")\n");
1451 }
1452
1453 void free_all_buffers(void)
1454 {
1455 CPU_FREE(cpu_present_set);
1456 cpu_present_set = NULL;
1457 cpu_present_set = 0;
1458
1459 CPU_FREE(cpu_affinity_set);
1460 cpu_affinity_set = NULL;
1461 cpu_affinity_setsize = 0;
1462
1463 free(thread_even);
1464 free(core_even);
1465 free(package_even);
1466
1467 thread_even = NULL;
1468 core_even = NULL;
1469 package_even = NULL;
1470
1471 free(thread_odd);
1472 free(core_odd);
1473 free(package_odd);
1474
1475 thread_odd = NULL;
1476 core_odd = NULL;
1477 package_odd = NULL;
1478
1479 free(output_buffer);
1480 output_buffer = NULL;
1481 outp = NULL;
1482 }
1483
1484 /*
1485 * Open a file, and exit on failure
1486 */
1487 FILE *fopen_or_die(const char *path, const char *mode)
1488 {
1489 FILE *filep = fopen(path, "r");
1490 if (!filep)
1491 err(1, "%s: open failed", path);
1492 return filep;
1493 }
1494
1495 /*
1496 * Parse a file containing a single int.
1497 */
1498 int parse_int_file(const char *fmt, ...)
1499 {
1500 va_list args;
1501 char path[PATH_MAX];
1502 FILE *filep;
1503 int value;
1504
1505 va_start(args, fmt);
1506 vsnprintf(path, sizeof(path), fmt, args);
1507 va_end(args);
1508 filep = fopen_or_die(path, "r");
1509 if (fscanf(filep, "%d", &value) != 1)
1510 err(1, "%s: failed to parse number from file", path);
1511 fclose(filep);
1512 return value;
1513 }
1514
1515 /*
1516 * get_cpu_position_in_core(cpu)
1517 * return the position of the CPU among its HT siblings in the core
1518 * return -1 if the sibling is not in list
1519 */
1520 int get_cpu_position_in_core(int cpu)
1521 {
1522 char path[64];
1523 FILE *filep;
1524 int this_cpu;
1525 char character;
1526 int i;
1527
1528 sprintf(path,
1529 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1530 cpu);
1531 filep = fopen(path, "r");
1532 if (filep == NULL) {
1533 perror(path);
1534 exit(1);
1535 }
1536
1537 for (i = 0; i < topo.num_threads_per_core; i++) {
1538 fscanf(filep, "%d", &this_cpu);
1539 if (this_cpu == cpu) {
1540 fclose(filep);
1541 return i;
1542 }
1543
1544 /* Account for no separator after last thread*/
1545 if (i != (topo.num_threads_per_core - 1))
1546 fscanf(filep, "%c", &character);
1547 }
1548
1549 fclose(filep);
1550 return -1;
1551 }
1552
1553 /*
1554 * cpu_is_first_core_in_package(cpu)
1555 * return 1 if given CPU is 1st core in package
1556 */
1557 int cpu_is_first_core_in_package(int cpu)
1558 {
1559 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
1560 }
1561
1562 int get_physical_package_id(int cpu)
1563 {
1564 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
1565 }
1566
1567 int get_core_id(int cpu)
1568 {
1569 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
1570 }
1571
1572 int get_num_ht_siblings(int cpu)
1573 {
1574 char path[80];
1575 FILE *filep;
1576 int sib1;
1577 int matches = 0;
1578 char character;
1579 char str[100];
1580 char *ch;
1581
1582 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1583 filep = fopen_or_die(path, "r");
1584
1585 /*
1586 * file format:
1587 * A ',' separated or '-' separated set of numbers
1588 * (eg 1-2 or 1,3,4,5)
1589 */
1590 fscanf(filep, "%d%c\n", &sib1, &character);
1591 fseek(filep, 0, SEEK_SET);
1592 fgets(str, 100, filep);
1593 ch = strchr(str, character);
1594 while (ch != NULL) {
1595 matches++;
1596 ch = strchr(ch+1, character);
1597 }
1598
1599 fclose(filep);
1600 return matches+1;
1601 }
1602
1603 /*
1604 * run func(thread, core, package) in topology order
1605 * skip non-present cpus
1606 */
1607
1608 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
1609 struct pkg_data *, struct thread_data *, struct core_data *,
1610 struct pkg_data *), struct thread_data *thread_base,
1611 struct core_data *core_base, struct pkg_data *pkg_base,
1612 struct thread_data *thread_base2, struct core_data *core_base2,
1613 struct pkg_data *pkg_base2)
1614 {
1615 int retval, pkg_no, core_no, thread_no;
1616
1617 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
1618 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
1619 for (thread_no = 0; thread_no <
1620 topo.num_threads_per_core; ++thread_no) {
1621 struct thread_data *t, *t2;
1622 struct core_data *c, *c2;
1623 struct pkg_data *p, *p2;
1624
1625 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
1626
1627 if (cpu_is_not_present(t->cpu_id))
1628 continue;
1629
1630 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
1631
1632 c = GET_CORE(core_base, core_no, pkg_no);
1633 c2 = GET_CORE(core_base2, core_no, pkg_no);
1634
1635 p = GET_PKG(pkg_base, pkg_no);
1636 p2 = GET_PKG(pkg_base2, pkg_no);
1637
1638 retval = func(t, c, p, t2, c2, p2);
1639 if (retval)
1640 return retval;
1641 }
1642 }
1643 }
1644 return 0;
1645 }
1646
1647 /*
1648 * run func(cpu) on every cpu in /proc/stat
1649 * return max_cpu number
1650 */
1651 int for_all_proc_cpus(int (func)(int))
1652 {
1653 FILE *fp;
1654 int cpu_num;
1655 int retval;
1656
1657 fp = fopen_or_die(proc_stat, "r");
1658
1659 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1660 if (retval != 0)
1661 err(1, "%s: failed to parse format", proc_stat);
1662
1663 while (1) {
1664 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1665 if (retval != 1)
1666 break;
1667
1668 retval = func(cpu_num);
1669 if (retval) {
1670 fclose(fp);
1671 return(retval);
1672 }
1673 }
1674 fclose(fp);
1675 return 0;
1676 }
1677
1678 void re_initialize(void)
1679 {
1680 free_all_buffers();
1681 setup_all_buffers();
1682 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
1683 }
1684
1685
1686 /*
1687 * count_cpus()
1688 * remember the last one seen, it will be the max
1689 */
1690 int count_cpus(int cpu)
1691 {
1692 if (topo.max_cpu_num < cpu)
1693 topo.max_cpu_num = cpu;
1694
1695 topo.num_cpus += 1;
1696 return 0;
1697 }
1698 int mark_cpu_present(int cpu)
1699 {
1700 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1701 return 0;
1702 }
1703
1704 void turbostat_loop()
1705 {
1706 int retval;
1707 int restarted = 0;
1708
1709 restart:
1710 restarted++;
1711
1712 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1713 if (retval < -1) {
1714 exit(retval);
1715 } else if (retval == -1) {
1716 if (restarted > 1) {
1717 exit(retval);
1718 }
1719 re_initialize();
1720 goto restart;
1721 }
1722 restarted = 0;
1723 gettimeofday(&tv_even, (struct timezone *)NULL);
1724
1725 while (1) {
1726 if (for_all_proc_cpus(cpu_is_not_present)) {
1727 re_initialize();
1728 goto restart;
1729 }
1730 nanosleep(&interval_ts, NULL);
1731 retval = for_all_cpus(get_counters, ODD_COUNTERS);
1732 if (retval < -1) {
1733 exit(retval);
1734 } else if (retval == -1) {
1735 re_initialize();
1736 goto restart;
1737 }
1738 gettimeofday(&tv_odd, (struct timezone *)NULL);
1739 timersub(&tv_odd, &tv_even, &tv_delta);
1740 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1741 compute_average(EVEN_COUNTERS);
1742 format_all_counters(EVEN_COUNTERS);
1743 flush_stdout();
1744 nanosleep(&interval_ts, NULL);
1745 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1746 if (retval < -1) {
1747 exit(retval);
1748 } else if (retval == -1) {
1749 re_initialize();
1750 goto restart;
1751 }
1752 gettimeofday(&tv_even, (struct timezone *)NULL);
1753 timersub(&tv_even, &tv_odd, &tv_delta);
1754 for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
1755 compute_average(ODD_COUNTERS);
1756 format_all_counters(ODD_COUNTERS);
1757 flush_stdout();
1758 }
1759 }
1760
1761 void check_dev_msr()
1762 {
1763 struct stat sb;
1764 char pathname[32];
1765
1766 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1767 if (stat(pathname, &sb))
1768 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1769 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1770 }
1771
1772 void check_permissions()
1773 {
1774 struct __user_cap_header_struct cap_header_data;
1775 cap_user_header_t cap_header = &cap_header_data;
1776 struct __user_cap_data_struct cap_data_data;
1777 cap_user_data_t cap_data = &cap_data_data;
1778 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1779 int do_exit = 0;
1780 char pathname[32];
1781
1782 /* check for CAP_SYS_RAWIO */
1783 cap_header->pid = getpid();
1784 cap_header->version = _LINUX_CAPABILITY_VERSION;
1785 if (capget(cap_header, cap_data) < 0)
1786 err(-6, "capget(2) failed");
1787
1788 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1789 do_exit++;
1790 warnx("capget(CAP_SYS_RAWIO) failed,"
1791 " try \"# setcap cap_sys_rawio=ep %s\"", progname);
1792 }
1793
1794 /* test file permissions */
1795 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1796 if (euidaccess(pathname, R_OK)) {
1797 do_exit++;
1798 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1799 }
1800
1801 /* if all else fails, thell them to be root */
1802 if (do_exit)
1803 if (getuid() != 0)
1804 warnx("... or simply run as root");
1805
1806 if (do_exit)
1807 exit(-6);
1808 }
1809
1810 /*
1811 * NHM adds support for additional MSRs:
1812 *
1813 * MSR_SMI_COUNT 0x00000034
1814 *
1815 * MSR_PLATFORM_INFO 0x000000ce
1816 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1817 *
1818 * MSR_PKG_C3_RESIDENCY 0x000003f8
1819 * MSR_PKG_C6_RESIDENCY 0x000003f9
1820 * MSR_CORE_C3_RESIDENCY 0x000003fc
1821 * MSR_CORE_C6_RESIDENCY 0x000003fd
1822 *
1823 * Side effect:
1824 * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1825 */
1826 int probe_nhm_msrs(unsigned int family, unsigned int model)
1827 {
1828 unsigned long long msr;
1829 unsigned int base_ratio;
1830 int *pkg_cstate_limits;
1831
1832 if (!genuine_intel)
1833 return 0;
1834
1835 if (family != 6)
1836 return 0;
1837
1838 bclk = discover_bclk(family, model);
1839
1840 switch (model) {
1841 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1842 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1843 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1844 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1845 case 0x2C: /* Westmere EP - Gulftown */
1846 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1847 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1848 pkg_cstate_limits = nhm_pkg_cstate_limits;
1849 break;
1850 case 0x2A: /* SNB */
1851 case 0x2D: /* SNB Xeon */
1852 case 0x3A: /* IVB */
1853 case 0x3E: /* IVB Xeon */
1854 pkg_cstate_limits = snb_pkg_cstate_limits;
1855 break;
1856 case 0x3C: /* HSW */
1857 case 0x3F: /* HSX */
1858 case 0x45: /* HSW */
1859 case 0x46: /* HSW */
1860 case 0x3D: /* BDW */
1861 case 0x47: /* BDW */
1862 case 0x4F: /* BDX */
1863 case 0x56: /* BDX-DE */
1864 case 0x4E: /* SKL */
1865 case 0x5E: /* SKL */
1866 pkg_cstate_limits = hsw_pkg_cstate_limits;
1867 break;
1868 case 0x37: /* BYT */
1869 case 0x4D: /* AVN */
1870 pkg_cstate_limits = slv_pkg_cstate_limits;
1871 break;
1872 case 0x4C: /* AMT */
1873 pkg_cstate_limits = amt_pkg_cstate_limits;
1874 break;
1875 case 0x57: /* PHI */
1876 pkg_cstate_limits = phi_pkg_cstate_limits;
1877 break;
1878 default:
1879 return 0;
1880 }
1881 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1882 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1883
1884 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1885 base_ratio = (msr >> 8) & 0xFF;
1886
1887 base_hz = base_ratio * bclk * 1000000;
1888 has_base_hz = 1;
1889 return 1;
1890 }
1891 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1892 {
1893 switch (model) {
1894 /* Nehalem compatible, but do not include turbo-ratio limit support */
1895 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1896 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1897 case 0x57: /* PHI - Knights Landing (different MSR definition) */
1898 return 0;
1899 default:
1900 return 1;
1901 }
1902 }
1903 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1904 {
1905 if (!genuine_intel)
1906 return 0;
1907
1908 if (family != 6)
1909 return 0;
1910
1911 switch (model) {
1912 case 0x3E: /* IVB Xeon */
1913 case 0x3F: /* HSW Xeon */
1914 return 1;
1915 default:
1916 return 0;
1917 }
1918 }
1919 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1920 {
1921 if (!genuine_intel)
1922 return 0;
1923
1924 if (family != 6)
1925 return 0;
1926
1927 switch (model) {
1928 case 0x3F: /* HSW Xeon */
1929 return 1;
1930 default:
1931 return 0;
1932 }
1933 }
1934
1935 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1936 {
1937 if (!genuine_intel)
1938 return 0;
1939
1940 if (family != 6)
1941 return 0;
1942
1943 switch (model) {
1944 case 0x57: /* Knights Landing */
1945 return 1;
1946 default:
1947 return 0;
1948 }
1949 }
1950 int has_config_tdp(unsigned int family, unsigned int model)
1951 {
1952 if (!genuine_intel)
1953 return 0;
1954
1955 if (family != 6)
1956 return 0;
1957
1958 switch (model) {
1959 case 0x3A: /* IVB */
1960 case 0x3C: /* HSW */
1961 case 0x3F: /* HSX */
1962 case 0x45: /* HSW */
1963 case 0x46: /* HSW */
1964 case 0x3D: /* BDW */
1965 case 0x47: /* BDW */
1966 case 0x4F: /* BDX */
1967 case 0x56: /* BDX-DE */
1968 case 0x4E: /* SKL */
1969 case 0x5E: /* SKL */
1970
1971 case 0x57: /* Knights Landing */
1972 return 1;
1973 default:
1974 return 0;
1975 }
1976 }
1977
1978 static void
1979 dump_cstate_pstate_config_info(family, model)
1980 {
1981 if (!do_nhm_platform_info)
1982 return;
1983
1984 dump_nhm_platform_info();
1985
1986 if (has_hsw_turbo_ratio_limit(family, model))
1987 dump_hsw_turbo_ratio_limits();
1988
1989 if (has_ivt_turbo_ratio_limit(family, model))
1990 dump_ivt_turbo_ratio_limits();
1991
1992 if (has_nhm_turbo_ratio_limit(family, model))
1993 dump_nhm_turbo_ratio_limits();
1994
1995 if (has_knl_turbo_ratio_limit(family, model))
1996 dump_knl_turbo_ratio_limits();
1997
1998 if (has_config_tdp(family, model))
1999 dump_config_tdp();
2000
2001 dump_nhm_cst_cfg();
2002 }
2003
2004
2005 /*
2006 * print_epb()
2007 * Decode the ENERGY_PERF_BIAS MSR
2008 */
2009 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2010 {
2011 unsigned long long msr;
2012 char *epb_string;
2013 int cpu;
2014
2015 if (!has_epb)
2016 return 0;
2017
2018 cpu = t->cpu_id;
2019
2020 /* EPB is per-package */
2021 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2022 return 0;
2023
2024 if (cpu_migrate(cpu)) {
2025 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2026 return -1;
2027 }
2028
2029 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
2030 return 0;
2031
2032 switch (msr & 0xF) {
2033 case ENERGY_PERF_BIAS_PERFORMANCE:
2034 epb_string = "performance";
2035 break;
2036 case ENERGY_PERF_BIAS_NORMAL:
2037 epb_string = "balanced";
2038 break;
2039 case ENERGY_PERF_BIAS_POWERSAVE:
2040 epb_string = "powersave";
2041 break;
2042 default:
2043 epb_string = "custom";
2044 break;
2045 }
2046 fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
2047
2048 return 0;
2049 }
2050 /*
2051 * print_hwp()
2052 * Decode the MSR_HWP_CAPABILITIES
2053 */
2054 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2055 {
2056 unsigned long long msr;
2057 int cpu;
2058
2059 if (!has_hwp)
2060 return 0;
2061
2062 cpu = t->cpu_id;
2063
2064 /* MSR_HWP_CAPABILITIES is per-package */
2065 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2066 return 0;
2067
2068 if (cpu_migrate(cpu)) {
2069 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2070 return -1;
2071 }
2072
2073 if (get_msr(cpu, MSR_PM_ENABLE, &msr))
2074 return 0;
2075
2076 fprintf(stderr, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n",
2077 cpu, msr, (msr & (1 << 0)) ? "" : "No-");
2078
2079 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
2080 if ((msr & (1 << 0)) == 0)
2081 return 0;
2082
2083 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
2084 return 0;
2085
2086 fprintf(stderr, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
2087 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
2088 cpu, msr,
2089 (unsigned int)HWP_HIGHEST_PERF(msr),
2090 (unsigned int)HWP_GUARANTEED_PERF(msr),
2091 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr),
2092 (unsigned int)HWP_LOWEST_PERF(msr));
2093
2094 if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
2095 return 0;
2096
2097 fprintf(stderr, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
2098 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
2099 cpu, msr,
2100 (unsigned int)(((msr) >> 0) & 0xff),
2101 (unsigned int)(((msr) >> 8) & 0xff),
2102 (unsigned int)(((msr) >> 16) & 0xff),
2103 (unsigned int)(((msr) >> 24) & 0xff),
2104 (unsigned int)(((msr) >> 32) & 0xff3),
2105 (unsigned int)(((msr) >> 42) & 0x1));
2106
2107 if (has_hwp_pkg) {
2108 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
2109 return 0;
2110
2111 fprintf(stderr, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
2112 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
2113 cpu, msr,
2114 (unsigned int)(((msr) >> 0) & 0xff),
2115 (unsigned int)(((msr) >> 8) & 0xff),
2116 (unsigned int)(((msr) >> 16) & 0xff),
2117 (unsigned int)(((msr) >> 24) & 0xff),
2118 (unsigned int)(((msr) >> 32) & 0xff3));
2119 }
2120 if (has_hwp_notify) {
2121 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
2122 return 0;
2123
2124 fprintf(stderr, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
2125 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
2126 cpu, msr,
2127 ((msr) & 0x1) ? "EN" : "Dis",
2128 ((msr) & 0x2) ? "EN" : "Dis");
2129 }
2130 if (get_msr(cpu, MSR_HWP_STATUS, &msr))
2131 return 0;
2132
2133 fprintf(stderr, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
2134 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
2135 cpu, msr,
2136 ((msr) & 0x1) ? "" : "No-",
2137 ((msr) & 0x2) ? "" : "No-");
2138
2139 return 0;
2140 }
2141
2142 /*
2143 * print_perf_limit()
2144 */
2145 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2146 {
2147 unsigned long long msr;
2148 int cpu;
2149
2150 cpu = t->cpu_id;
2151
2152 /* per-package */
2153 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2154 return 0;
2155
2156 if (cpu_migrate(cpu)) {
2157 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2158 return -1;
2159 }
2160
2161 if (do_core_perf_limit_reasons) {
2162 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
2163 fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2164 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
2165 (msr & 1 << 15) ? "bit15, " : "",
2166 (msr & 1 << 14) ? "bit14, " : "",
2167 (msr & 1 << 13) ? "Transitions, " : "",
2168 (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
2169 (msr & 1 << 11) ? "PkgPwrL2, " : "",
2170 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2171 (msr & 1 << 9) ? "CorePwr, " : "",
2172 (msr & 1 << 8) ? "Amps, " : "",
2173 (msr & 1 << 6) ? "VR-Therm, " : "",
2174 (msr & 1 << 5) ? "Auto-HWP, " : "",
2175 (msr & 1 << 4) ? "Graphics, " : "",
2176 (msr & 1 << 2) ? "bit2, " : "",
2177 (msr & 1 << 1) ? "ThermStatus, " : "",
2178 (msr & 1 << 0) ? "PROCHOT, " : "");
2179 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
2180 (msr & 1 << 31) ? "bit31, " : "",
2181 (msr & 1 << 30) ? "bit30, " : "",
2182 (msr & 1 << 29) ? "Transitions, " : "",
2183 (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
2184 (msr & 1 << 27) ? "PkgPwrL2, " : "",
2185 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2186 (msr & 1 << 25) ? "CorePwr, " : "",
2187 (msr & 1 << 24) ? "Amps, " : "",
2188 (msr & 1 << 22) ? "VR-Therm, " : "",
2189 (msr & 1 << 21) ? "Auto-HWP, " : "",
2190 (msr & 1 << 20) ? "Graphics, " : "",
2191 (msr & 1 << 18) ? "bit18, " : "",
2192 (msr & 1 << 17) ? "ThermStatus, " : "",
2193 (msr & 1 << 16) ? "PROCHOT, " : "");
2194
2195 }
2196 if (do_gfx_perf_limit_reasons) {
2197 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
2198 fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2199 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
2200 (msr & 1 << 0) ? "PROCHOT, " : "",
2201 (msr & 1 << 1) ? "ThermStatus, " : "",
2202 (msr & 1 << 4) ? "Graphics, " : "",
2203 (msr & 1 << 6) ? "VR-Therm, " : "",
2204 (msr & 1 << 8) ? "Amps, " : "",
2205 (msr & 1 << 9) ? "GFXPwr, " : "",
2206 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2207 (msr & 1 << 11) ? "PkgPwrL2, " : "");
2208 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
2209 (msr & 1 << 16) ? "PROCHOT, " : "",
2210 (msr & 1 << 17) ? "ThermStatus, " : "",
2211 (msr & 1 << 20) ? "Graphics, " : "",
2212 (msr & 1 << 22) ? "VR-Therm, " : "",
2213 (msr & 1 << 24) ? "Amps, " : "",
2214 (msr & 1 << 25) ? "GFXPwr, " : "",
2215 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2216 (msr & 1 << 27) ? "PkgPwrL2, " : "");
2217 }
2218 if (do_ring_perf_limit_reasons) {
2219 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
2220 fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2221 fprintf(stderr, " (Active: %s%s%s%s%s%s)",
2222 (msr & 1 << 0) ? "PROCHOT, " : "",
2223 (msr & 1 << 1) ? "ThermStatus, " : "",
2224 (msr & 1 << 6) ? "VR-Therm, " : "",
2225 (msr & 1 << 8) ? "Amps, " : "",
2226 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2227 (msr & 1 << 11) ? "PkgPwrL2, " : "");
2228 fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
2229 (msr & 1 << 16) ? "PROCHOT, " : "",
2230 (msr & 1 << 17) ? "ThermStatus, " : "",
2231 (msr & 1 << 22) ? "VR-Therm, " : "",
2232 (msr & 1 << 24) ? "Amps, " : "",
2233 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2234 (msr & 1 << 27) ? "PkgPwrL2, " : "");
2235 }
2236 return 0;
2237 }
2238
2239 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
2240 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
2241
2242 double get_tdp(model)
2243 {
2244 unsigned long long msr;
2245
2246 if (do_rapl & RAPL_PKG_POWER_INFO)
2247 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
2248 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
2249
2250 switch (model) {
2251 case 0x37:
2252 case 0x4D:
2253 return 30.0;
2254 default:
2255 return 135.0;
2256 }
2257 }
2258
2259 /*
2260 * rapl_dram_energy_units_probe()
2261 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
2262 */
2263 static double
2264 rapl_dram_energy_units_probe(int model, double rapl_energy_units)
2265 {
2266 /* only called for genuine_intel, family 6 */
2267
2268 switch (model) {
2269 case 0x3F: /* HSX */
2270 case 0x4F: /* BDX */
2271 case 0x56: /* BDX-DE */
2272 case 0x57: /* KNL */
2273 return (rapl_dram_energy_units = 15.3 / 1000000);
2274 default:
2275 return (rapl_energy_units);
2276 }
2277 }
2278
2279
2280 /*
2281 * rapl_probe()
2282 *
2283 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
2284 */
2285 void rapl_probe(unsigned int family, unsigned int model)
2286 {
2287 unsigned long long msr;
2288 unsigned int time_unit;
2289 double tdp;
2290
2291 if (!genuine_intel)
2292 return;
2293
2294 if (family != 6)
2295 return;
2296
2297 switch (model) {
2298 case 0x2A:
2299 case 0x3A:
2300 case 0x3C: /* HSW */
2301 case 0x45: /* HSW */
2302 case 0x46: /* HSW */
2303 case 0x3D: /* BDW */
2304 case 0x47: /* BDW */
2305 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2306 break;
2307 case 0x4E: /* SKL */
2308 case 0x5E: /* SKL */
2309 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2310 break;
2311 case 0x3F: /* HSX */
2312 case 0x4F: /* BDX */
2313 case 0x56: /* BDX-DE */
2314 case 0x57: /* KNL */
2315 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2316 break;
2317 case 0x2D:
2318 case 0x3E:
2319 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
2320 break;
2321 case 0x37: /* BYT */
2322 case 0x4D: /* AVN */
2323 do_rapl = RAPL_PKG | RAPL_CORES ;
2324 break;
2325 default:
2326 return;
2327 }
2328
2329 /* units on package 0, verify later other packages match */
2330 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
2331 return;
2332
2333 rapl_power_units = 1.0 / (1 << (msr & 0xF));
2334 if (model == 0x37)
2335 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
2336 else
2337 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
2338
2339 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
2340
2341 time_unit = msr >> 16 & 0xF;
2342 if (time_unit == 0)
2343 time_unit = 0xA;
2344
2345 rapl_time_units = 1.0 / (1 << (time_unit));
2346
2347 tdp = get_tdp(model);
2348
2349 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
2350 if (debug)
2351 fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
2352
2353 return;
2354 }
2355
2356 void perf_limit_reasons_probe(family, model)
2357 {
2358 if (!genuine_intel)
2359 return;
2360
2361 if (family != 6)
2362 return;
2363
2364 switch (model) {
2365 case 0x3C: /* HSW */
2366 case 0x45: /* HSW */
2367 case 0x46: /* HSW */
2368 do_gfx_perf_limit_reasons = 1;
2369 case 0x3F: /* HSX */
2370 do_core_perf_limit_reasons = 1;
2371 do_ring_perf_limit_reasons = 1;
2372 default:
2373 return;
2374 }
2375 }
2376
2377 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2378 {
2379 unsigned long long msr;
2380 unsigned int dts;
2381 int cpu;
2382
2383 if (!(do_dts || do_ptm))
2384 return 0;
2385
2386 cpu = t->cpu_id;
2387
2388 /* DTS is per-core, no need to print for each thread */
2389 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
2390 return 0;
2391
2392 if (cpu_migrate(cpu)) {
2393 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2394 return -1;
2395 }
2396
2397 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
2398 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2399 return 0;
2400
2401 dts = (msr >> 16) & 0x7F;
2402 fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2403 cpu, msr, tcc_activation_temp - dts);
2404
2405 #ifdef THERM_DEBUG
2406 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
2407 return 0;
2408
2409 dts = (msr >> 16) & 0x7F;
2410 dts2 = (msr >> 8) & 0x7F;
2411 fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2412 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2413 #endif
2414 }
2415
2416
2417 if (do_dts) {
2418 unsigned int resolution;
2419
2420 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
2421 return 0;
2422
2423 dts = (msr >> 16) & 0x7F;
2424 resolution = (msr >> 27) & 0xF;
2425 fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2426 cpu, msr, tcc_activation_temp - dts, resolution);
2427
2428 #ifdef THERM_DEBUG
2429 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
2430 return 0;
2431
2432 dts = (msr >> 16) & 0x7F;
2433 dts2 = (msr >> 8) & 0x7F;
2434 fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2435 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2436 #endif
2437 }
2438
2439 return 0;
2440 }
2441
2442 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
2443 {
2444 fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2445 cpu, label,
2446 ((msr >> 15) & 1) ? "EN" : "DIS",
2447 ((msr >> 0) & 0x7FFF) * rapl_power_units,
2448 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
2449 (((msr >> 16) & 1) ? "EN" : "DIS"));
2450
2451 return;
2452 }
2453
2454 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2455 {
2456 unsigned long long msr;
2457 int cpu;
2458
2459 if (!do_rapl)
2460 return 0;
2461
2462 /* RAPL counters are per package, so print only for 1st thread/package */
2463 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2464 return 0;
2465
2466 cpu = t->cpu_id;
2467 if (cpu_migrate(cpu)) {
2468 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2469 return -1;
2470 }
2471
2472 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
2473 return -1;
2474
2475 if (debug) {
2476 fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2477 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
2478 rapl_power_units, rapl_energy_units, rapl_time_units);
2479 }
2480 if (do_rapl & RAPL_PKG_POWER_INFO) {
2481
2482 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
2483 return -5;
2484
2485
2486 fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2487 cpu, msr,
2488 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2489 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2490 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2491 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2492
2493 }
2494 if (do_rapl & RAPL_PKG) {
2495
2496 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
2497 return -9;
2498
2499 fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2500 cpu, msr, (msr >> 63) & 1 ? "": "UN");
2501
2502 print_power_limit_msr(cpu, msr, "PKG Limit #1");
2503 fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2504 cpu,
2505 ((msr >> 47) & 1) ? "EN" : "DIS",
2506 ((msr >> 32) & 0x7FFF) * rapl_power_units,
2507 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
2508 ((msr >> 48) & 1) ? "EN" : "DIS");
2509 }
2510
2511 if (do_rapl & RAPL_DRAM_POWER_INFO) {
2512 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
2513 return -6;
2514
2515 fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2516 cpu, msr,
2517 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2518 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2519 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2520 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2521 }
2522 if (do_rapl & RAPL_DRAM) {
2523 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
2524 return -9;
2525 fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2526 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2527
2528 print_power_limit_msr(cpu, msr, "DRAM Limit");
2529 }
2530 if (do_rapl & RAPL_CORE_POLICY) {
2531 if (debug) {
2532 if (get_msr(cpu, MSR_PP0_POLICY, &msr))
2533 return -7;
2534
2535 fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
2536 }
2537 }
2538 if (do_rapl & RAPL_CORES) {
2539 if (debug) {
2540
2541 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
2542 return -9;
2543 fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2544 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2545 print_power_limit_msr(cpu, msr, "Cores Limit");
2546 }
2547 }
2548 if (do_rapl & RAPL_GFX) {
2549 if (debug) {
2550 if (get_msr(cpu, MSR_PP1_POLICY, &msr))
2551 return -8;
2552
2553 fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
2554
2555 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
2556 return -9;
2557 fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2558 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2559 print_power_limit_msr(cpu, msr, "GFX Limit");
2560 }
2561 }
2562 return 0;
2563 }
2564
2565 /*
2566 * SNB adds support for additional MSRs:
2567 *
2568 * MSR_PKG_C7_RESIDENCY 0x000003fa
2569 * MSR_CORE_C7_RESIDENCY 0x000003fe
2570 * MSR_PKG_C2_RESIDENCY 0x0000060d
2571 */
2572
2573 int has_snb_msrs(unsigned int family, unsigned int model)
2574 {
2575 if (!genuine_intel)
2576 return 0;
2577
2578 switch (model) {
2579 case 0x2A:
2580 case 0x2D:
2581 case 0x3A: /* IVB */
2582 case 0x3E: /* IVB Xeon */
2583 case 0x3C: /* HSW */
2584 case 0x3F: /* HSW */
2585 case 0x45: /* HSW */
2586 case 0x46: /* HSW */
2587 case 0x3D: /* BDW */
2588 case 0x47: /* BDW */
2589 case 0x4F: /* BDX */
2590 case 0x56: /* BDX-DE */
2591 case 0x4E: /* SKL */
2592 case 0x5E: /* SKL */
2593 return 1;
2594 }
2595 return 0;
2596 }
2597
2598 /*
2599 * HSW adds support for additional MSRs:
2600 *
2601 * MSR_PKG_C8_RESIDENCY 0x00000630
2602 * MSR_PKG_C9_RESIDENCY 0x00000631
2603 * MSR_PKG_C10_RESIDENCY 0x00000632
2604 */
2605 int has_hsw_msrs(unsigned int family, unsigned int model)
2606 {
2607 if (!genuine_intel)
2608 return 0;
2609
2610 switch (model) {
2611 case 0x45: /* HSW */
2612 case 0x3D: /* BDW */
2613 case 0x4E: /* SKL */
2614 case 0x5E: /* SKL */
2615 return 1;
2616 }
2617 return 0;
2618 }
2619
2620 /*
2621 * SKL adds support for additional MSRS:
2622 *
2623 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
2624 * MSR_PKG_ANY_CORE_C0_RES 0x00000659
2625 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
2626 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
2627 */
2628 int has_skl_msrs(unsigned int family, unsigned int model)
2629 {
2630 if (!genuine_intel)
2631 return 0;
2632
2633 switch (model) {
2634 case 0x4E: /* SKL */
2635 case 0x5E: /* SKL */
2636 return 1;
2637 }
2638 return 0;
2639 }
2640
2641
2642
2643 int is_slm(unsigned int family, unsigned int model)
2644 {
2645 if (!genuine_intel)
2646 return 0;
2647 switch (model) {
2648 case 0x37: /* BYT */
2649 case 0x4D: /* AVN */
2650 return 1;
2651 }
2652 return 0;
2653 }
2654
2655 int is_knl(unsigned int family, unsigned int model)
2656 {
2657 if (!genuine_intel)
2658 return 0;
2659 switch (model) {
2660 case 0x57: /* KNL */
2661 return 1;
2662 }
2663 return 0;
2664 }
2665
2666 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
2667 {
2668 if (is_knl(family, model))
2669 return 1024;
2670 return 1;
2671 }
2672
2673 #define SLM_BCLK_FREQS 5
2674 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2675
2676 double slm_bclk(void)
2677 {
2678 unsigned long long msr = 3;
2679 unsigned int i;
2680 double freq;
2681
2682 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
2683 fprintf(stderr, "SLM BCLK: unknown\n");
2684
2685 i = msr & 0xf;
2686 if (i >= SLM_BCLK_FREQS) {
2687 fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
2688 msr = 3;
2689 }
2690 freq = slm_freq_table[i];
2691
2692 fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
2693
2694 return freq;
2695 }
2696
2697 double discover_bclk(unsigned int family, unsigned int model)
2698 {
2699 if (has_snb_msrs(family, model) || is_knl(family, model))
2700 return 100.00;
2701 else if (is_slm(family, model))
2702 return slm_bclk();
2703 else
2704 return 133.33;
2705 }
2706
2707 /*
2708 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2709 * the Thermal Control Circuit (TCC) activates.
2710 * This is usually equal to tjMax.
2711 *
2712 * Older processors do not have this MSR, so there we guess,
2713 * but also allow cmdline over-ride with -T.
2714 *
2715 * Several MSR temperature values are in units of degrees-C
2716 * below this value, including the Digital Thermal Sensor (DTS),
2717 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2718 */
2719 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2720 {
2721 unsigned long long msr;
2722 unsigned int target_c_local;
2723 int cpu;
2724
2725 /* tcc_activation_temp is used only for dts or ptm */
2726 if (!(do_dts || do_ptm))
2727 return 0;
2728
2729 /* this is a per-package concept */
2730 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2731 return 0;
2732
2733 cpu = t->cpu_id;
2734 if (cpu_migrate(cpu)) {
2735 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2736 return -1;
2737 }
2738
2739 if (tcc_activation_temp_override != 0) {
2740 tcc_activation_temp = tcc_activation_temp_override;
2741 fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
2742 cpu, tcc_activation_temp);
2743 return 0;
2744 }
2745
2746 /* Temperature Target MSR is Nehalem and newer only */
2747 if (!do_nhm_platform_info)
2748 goto guess;
2749
2750 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
2751 goto guess;
2752
2753 target_c_local = (msr >> 16) & 0xFF;
2754
2755 if (debug)
2756 fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2757 cpu, msr, target_c_local);
2758
2759 if (!target_c_local)
2760 goto guess;
2761
2762 tcc_activation_temp = target_c_local;
2763
2764 return 0;
2765
2766 guess:
2767 tcc_activation_temp = TJMAX_DEFAULT;
2768 fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2769 cpu, tcc_activation_temp);
2770
2771 return 0;
2772 }
2773
2774 void decode_misc_enable_msr(void)
2775 {
2776 unsigned long long msr;
2777
2778 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
2779 fprintf(stderr, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n",
2780 base_cpu, msr,
2781 msr & (1 << 3) ? "TCC" : "",
2782 msr & (1 << 16) ? "EIST" : "",
2783 msr & (1 << 18) ? "MONITOR" : "");
2784 }
2785
2786 /*
2787 * Decode MSR_MISC_PWR_MGMT
2788 *
2789 * Decode the bits according to the Nehalem documentation
2790 * bit[0] seems to continue to have same meaning going forward
2791 * bit[1] less so...
2792 */
2793 void decode_misc_pwr_mgmt_msr(void)
2794 {
2795 unsigned long long msr;
2796
2797 if (!do_nhm_platform_info)
2798 return;
2799
2800 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
2801 fprintf(stderr, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB)\n",
2802 base_cpu, msr,
2803 msr & (1 << 0) ? "DIS" : "EN",
2804 msr & (1 << 1) ? "EN" : "DIS");
2805 }
2806
2807 void process_cpuid()
2808 {
2809 unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
2810 unsigned int fms, family, model, stepping;
2811
2812 eax = ebx = ecx = edx = 0;
2813
2814 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
2815
2816 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
2817 genuine_intel = 1;
2818
2819 if (debug)
2820 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
2821 (char *)&ebx, (char *)&edx, (char *)&ecx);
2822
2823 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
2824 family = (fms >> 8) & 0xf;
2825 model = (fms >> 4) & 0xf;
2826 stepping = fms & 0xf;
2827 if (family == 6 || family == 0xf)
2828 model += ((fms >> 16) & 0xf) << 4;
2829
2830 if (debug) {
2831 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2832 max_level, family, model, stepping, family, model, stepping);
2833 fprintf(stderr, "CPUID(1): %s %s %s %s %s %s %s %s\n",
2834 ecx & (1 << 0) ? "SSE3" : "-",
2835 ecx & (1 << 3) ? "MONITOR" : "-",
2836 ecx & (1 << 7) ? "EIST" : "-",
2837 ecx & (1 << 8) ? "TM2" : "-",
2838 edx & (1 << 4) ? "TSC" : "-",
2839 edx & (1 << 5) ? "MSR" : "-",
2840 edx & (1 << 22) ? "ACPI-TM" : "-",
2841 edx & (1 << 29) ? "TM" : "-");
2842 }
2843
2844 if (!(edx & (1 << 5)))
2845 errx(1, "CPUID: no MSR");
2846
2847 /*
2848 * check max extended function levels of CPUID.
2849 * This is needed to check for invariant TSC.
2850 * This check is valid for both Intel and AMD.
2851 */
2852 ebx = ecx = edx = 0;
2853 __get_cpuid(0x80000000, &max_extended_level, &ebx, &ecx, &edx);
2854
2855 if (max_extended_level >= 0x80000007) {
2856
2857 /*
2858 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2859 * this check is valid for both Intel and AMD
2860 */
2861 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2862 has_invariant_tsc = edx & (1 << 8);
2863 }
2864
2865 /*
2866 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2867 * this check is valid for both Intel and AMD
2868 */
2869
2870 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
2871 has_aperf = ecx & (1 << 0);
2872 do_dts = eax & (1 << 0);
2873 do_ptm = eax & (1 << 6);
2874 has_hwp = eax & (1 << 7);
2875 has_hwp_notify = eax & (1 << 8);
2876 has_hwp_activity_window = eax & (1 << 9);
2877 has_hwp_epp = eax & (1 << 10);
2878 has_hwp_pkg = eax & (1 << 11);
2879 has_epb = ecx & (1 << 3);
2880
2881 if (debug)
2882 fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, "
2883 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
2884 has_aperf ? "" : "No-",
2885 do_dts ? "" : "No-",
2886 do_ptm ? "" : "No-",
2887 has_hwp ? "" : "No-",
2888 has_hwp_notify ? "" : "No-",
2889 has_hwp_activity_window ? "" : "No-",
2890 has_hwp_epp ? "" : "No-",
2891 has_hwp_pkg ? "" : "No-",
2892 has_epb ? "" : "No-");
2893
2894 if (debug)
2895 decode_misc_enable_msr();
2896
2897 if (max_level >= 0x15) {
2898 unsigned int eax_crystal;
2899 unsigned int ebx_tsc;
2900
2901 /*
2902 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2903 */
2904 eax_crystal = ebx_tsc = crystal_hz = edx = 0;
2905 __get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
2906
2907 if (ebx_tsc != 0) {
2908
2909 if (debug && (ebx != 0))
2910 fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2911 eax_crystal, ebx_tsc, crystal_hz);
2912
2913 if (crystal_hz == 0)
2914 switch(model) {
2915 case 0x4E: /* SKL */
2916 case 0x5E: /* SKL */
2917 crystal_hz = 24000000; /* 24 MHz */
2918 break;
2919 default:
2920 crystal_hz = 0;
2921 }
2922
2923 if (crystal_hz) {
2924 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
2925 if (debug)
2926 fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2927 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
2928 }
2929 }
2930 }
2931 if (max_level >= 0x16) {
2932 unsigned int base_mhz, max_mhz, bus_mhz, edx;
2933
2934 /*
2935 * CPUID 16H Base MHz, Max MHz, Bus MHz
2936 */
2937 base_mhz = max_mhz = bus_mhz = edx = 0;
2938
2939 __get_cpuid(0x16, &base_mhz, &max_mhz, &bus_mhz, &edx);
2940 if (debug)
2941 fprintf(stderr, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
2942 base_mhz, max_mhz, bus_mhz);
2943 }
2944
2945 if (has_aperf)
2946 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
2947
2948 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2949 do_snb_cstates = has_snb_msrs(family, model);
2950 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
2951 do_pc3 = (pkg_cstate_limit >= PCL__3);
2952 do_pc6 = (pkg_cstate_limit >= PCL__6);
2953 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
2954 do_c8_c9_c10 = has_hsw_msrs(family, model);
2955 do_skl_residency = has_skl_msrs(family, model);
2956 do_slm_cstates = is_slm(family, model);
2957 do_knl_cstates = is_knl(family, model);
2958
2959 if (debug)
2960 decode_misc_pwr_mgmt_msr();
2961
2962 rapl_probe(family, model);
2963 perf_limit_reasons_probe(family, model);
2964
2965 if (debug)
2966 dump_cstate_pstate_config_info();
2967
2968 if (has_skl_msrs(family, model))
2969 calculate_tsc_tweak();
2970
2971 return;
2972 }
2973
2974 void help()
2975 {
2976 fprintf(stderr,
2977 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2978 "\n"
2979 "Turbostat forks the specified COMMAND and prints statistics\n"
2980 "when COMMAND completes.\n"
2981 "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2982 "to print statistics, until interrupted.\n"
2983 "--debug run in \"debug\" mode\n"
2984 "--interval sec Override default 5-second measurement interval\n"
2985 "--help print this help message\n"
2986 "--counter msr print 32-bit counter at address \"msr\"\n"
2987 "--Counter msr print 64-bit Counter at address \"msr\"\n"
2988 "--msr msr print 32-bit value at address \"msr\"\n"
2989 "--MSR msr print 64-bit Value at address \"msr\"\n"
2990 "--version print version information\n"
2991 "\n"
2992 "For more help, run \"man turbostat\"\n");
2993 }
2994
2995
2996 /*
2997 * in /dev/cpu/ return success for names that are numbers
2998 * ie. filter out ".", "..", "microcode".
2999 */
3000 int dir_filter(const struct dirent *dirp)
3001 {
3002 if (isdigit(dirp->d_name[0]))
3003 return 1;
3004 else
3005 return 0;
3006 }
3007
3008 int open_dev_cpu_msr(int dummy1)
3009 {
3010 return 0;
3011 }
3012
3013 void topology_probe()
3014 {
3015 int i;
3016 int max_core_id = 0;
3017 int max_package_id = 0;
3018 int max_siblings = 0;
3019 struct cpu_topology {
3020 int core_id;
3021 int physical_package_id;
3022 } *cpus;
3023
3024 /* Initialize num_cpus, max_cpu_num */
3025 topo.num_cpus = 0;
3026 topo.max_cpu_num = 0;
3027 for_all_proc_cpus(count_cpus);
3028 if (!summary_only && topo.num_cpus > 1)
3029 show_cpu = 1;
3030
3031 if (debug > 1)
3032 fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
3033
3034 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
3035 if (cpus == NULL)
3036 err(1, "calloc cpus");
3037
3038 /*
3039 * Allocate and initialize cpu_present_set
3040 */
3041 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
3042 if (cpu_present_set == NULL)
3043 err(3, "CPU_ALLOC");
3044 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
3045 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
3046 for_all_proc_cpus(mark_cpu_present);
3047
3048 /*
3049 * Allocate and initialize cpu_affinity_set
3050 */
3051 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
3052 if (cpu_affinity_set == NULL)
3053 err(3, "CPU_ALLOC");
3054 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
3055 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
3056
3057
3058 /*
3059 * For online cpus
3060 * find max_core_id, max_package_id
3061 */
3062 for (i = 0; i <= topo.max_cpu_num; ++i) {
3063 int siblings;
3064
3065 if (cpu_is_not_present(i)) {
3066 if (debug > 1)
3067 fprintf(stderr, "cpu%d NOT PRESENT\n", i);
3068 continue;
3069 }
3070 cpus[i].core_id = get_core_id(i);
3071 if (cpus[i].core_id > max_core_id)
3072 max_core_id = cpus[i].core_id;
3073
3074 cpus[i].physical_package_id = get_physical_package_id(i);
3075 if (cpus[i].physical_package_id > max_package_id)
3076 max_package_id = cpus[i].physical_package_id;
3077
3078 siblings = get_num_ht_siblings(i);
3079 if (siblings > max_siblings)
3080 max_siblings = siblings;
3081 if (debug > 1)
3082 fprintf(stderr, "cpu %d pkg %d core %d\n",
3083 i, cpus[i].physical_package_id, cpus[i].core_id);
3084 }
3085 topo.num_cores_per_pkg = max_core_id + 1;
3086 if (debug > 1)
3087 fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
3088 max_core_id, topo.num_cores_per_pkg);
3089 if (debug && !summary_only && topo.num_cores_per_pkg > 1)
3090 show_core = 1;
3091
3092 topo.num_packages = max_package_id + 1;
3093 if (debug > 1)
3094 fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
3095 max_package_id, topo.num_packages);
3096 if (debug && !summary_only && topo.num_packages > 1)
3097 show_pkg = 1;
3098
3099 topo.num_threads_per_core = max_siblings;
3100 if (debug > 1)
3101 fprintf(stderr, "max_siblings %d\n", max_siblings);
3102
3103 free(cpus);
3104 }
3105
3106 void
3107 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
3108 {
3109 int i;
3110
3111 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
3112 topo.num_packages, sizeof(struct thread_data));
3113 if (*t == NULL)
3114 goto error;
3115
3116 for (i = 0; i < topo.num_threads_per_core *
3117 topo.num_cores_per_pkg * topo.num_packages; i++)
3118 (*t)[i].cpu_id = -1;
3119
3120 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
3121 sizeof(struct core_data));
3122 if (*c == NULL)
3123 goto error;
3124
3125 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
3126 (*c)[i].core_id = -1;
3127
3128 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
3129 if (*p == NULL)
3130 goto error;
3131
3132 for (i = 0; i < topo.num_packages; i++)
3133 (*p)[i].package_id = i;
3134
3135 return;
3136 error:
3137 err(1, "calloc counters");
3138 }
3139 /*
3140 * init_counter()
3141 *
3142 * set cpu_id, core_num, pkg_num
3143 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
3144 *
3145 * increment topo.num_cores when 1st core in pkg seen
3146 */
3147 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
3148 struct pkg_data *pkg_base, int thread_num, int core_num,
3149 int pkg_num, int cpu_id)
3150 {
3151 struct thread_data *t;
3152 struct core_data *c;
3153 struct pkg_data *p;
3154
3155 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
3156 c = GET_CORE(core_base, core_num, pkg_num);
3157 p = GET_PKG(pkg_base, pkg_num);
3158
3159 t->cpu_id = cpu_id;
3160 if (thread_num == 0) {
3161 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
3162 if (cpu_is_first_core_in_package(cpu_id))
3163 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
3164 }
3165
3166 c->core_id = core_num;
3167 p->package_id = pkg_num;
3168 }
3169
3170
3171 int initialize_counters(int cpu_id)
3172 {
3173 int my_thread_id, my_core_id, my_package_id;
3174
3175 my_package_id = get_physical_package_id(cpu_id);
3176 my_core_id = get_core_id(cpu_id);
3177 my_thread_id = get_cpu_position_in_core(cpu_id);
3178 if (!my_thread_id)
3179 topo.num_cores++;
3180
3181 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
3182 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
3183 return 0;
3184 }
3185
3186 void allocate_output_buffer()
3187 {
3188 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
3189 outp = output_buffer;
3190 if (outp == NULL)
3191 err(-1, "calloc output buffer");
3192 }
3193
3194 void setup_all_buffers(void)
3195 {
3196 topology_probe();
3197 allocate_counters(&thread_even, &core_even, &package_even);
3198 allocate_counters(&thread_odd, &core_odd, &package_odd);
3199 allocate_output_buffer();
3200 for_all_proc_cpus(initialize_counters);
3201 }
3202
3203 void set_base_cpu(void)
3204 {
3205 base_cpu = sched_getcpu();
3206 if (base_cpu < 0)
3207 err(-ENODEV, "No valid cpus found");
3208
3209 if (debug > 1)
3210 fprintf(stderr, "base_cpu = %d\n", base_cpu);
3211 }
3212
3213 void turbostat_init()
3214 {
3215 setup_all_buffers();
3216 set_base_cpu();
3217 check_dev_msr();
3218 check_permissions();
3219 process_cpuid();
3220
3221
3222 if (debug)
3223 for_all_cpus(print_hwp, ODD_COUNTERS);
3224
3225 if (debug)
3226 for_all_cpus(print_epb, ODD_COUNTERS);
3227
3228 if (debug)
3229 for_all_cpus(print_perf_limit, ODD_COUNTERS);
3230
3231 if (debug)
3232 for_all_cpus(print_rapl, ODD_COUNTERS);
3233
3234 for_all_cpus(set_temperature_target, ODD_COUNTERS);
3235
3236 if (debug)
3237 for_all_cpus(print_thermal, ODD_COUNTERS);
3238 }
3239
3240 int fork_it(char **argv)
3241 {
3242 pid_t child_pid;
3243 int status;
3244
3245 status = for_all_cpus(get_counters, EVEN_COUNTERS);
3246 if (status)
3247 exit(status);
3248 /* clear affinity side-effect of get_counters() */
3249 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
3250 gettimeofday(&tv_even, (struct timezone *)NULL);
3251
3252 child_pid = fork();
3253 if (!child_pid) {
3254 /* child */
3255 execvp(argv[0], argv);
3256 } else {
3257
3258 /* parent */
3259 if (child_pid == -1)
3260 err(1, "fork");
3261
3262 signal(SIGINT, SIG_IGN);
3263 signal(SIGQUIT, SIG_IGN);
3264 if (waitpid(child_pid, &status, 0) == -1)
3265 err(status, "waitpid");
3266 }
3267 /*
3268 * n.b. fork_it() does not check for errors from for_all_cpus()
3269 * because re-starting is problematic when forking
3270 */
3271 for_all_cpus(get_counters, ODD_COUNTERS);
3272 gettimeofday(&tv_odd, (struct timezone *)NULL);
3273 timersub(&tv_odd, &tv_even, &tv_delta);
3274 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
3275 compute_average(EVEN_COUNTERS);
3276 format_all_counters(EVEN_COUNTERS);
3277 flush_stderr();
3278
3279 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
3280
3281 return status;
3282 }
3283
3284 int get_and_dump_counters(void)
3285 {
3286 int status;
3287
3288 status = for_all_cpus(get_counters, ODD_COUNTERS);
3289 if (status)
3290 return status;
3291
3292 status = for_all_cpus(dump_counters, ODD_COUNTERS);
3293 if (status)
3294 return status;
3295
3296 flush_stdout();
3297
3298 return status;
3299 }
3300
3301 void print_version() {
3302 fprintf(stderr, "turbostat version 4.10 10 Dec, 2015"
3303 " - Len Brown <lenb@kernel.org>\n");
3304 }
3305
3306 void cmdline(int argc, char **argv)
3307 {
3308 int opt;
3309 int option_index = 0;
3310 static struct option long_options[] = {
3311 {"Counter", required_argument, 0, 'C'},
3312 {"counter", required_argument, 0, 'c'},
3313 {"Dump", no_argument, 0, 'D'},
3314 {"debug", no_argument, 0, 'd'},
3315 {"interval", required_argument, 0, 'i'},
3316 {"help", no_argument, 0, 'h'},
3317 {"Joules", no_argument, 0, 'J'},
3318 {"MSR", required_argument, 0, 'M'},
3319 {"msr", required_argument, 0, 'm'},
3320 {"Package", no_argument, 0, 'p'},
3321 {"processor", no_argument, 0, 'p'},
3322 {"Summary", no_argument, 0, 'S'},
3323 {"TCC", required_argument, 0, 'T'},
3324 {"version", no_argument, 0, 'v' },
3325 {0, 0, 0, 0 }
3326 };
3327
3328 progname = argv[0];
3329
3330 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v",
3331 long_options, &option_index)) != -1) {
3332 switch (opt) {
3333 case 'C':
3334 sscanf(optarg, "%x", &extra_delta_offset64);
3335 break;
3336 case 'c':
3337 sscanf(optarg, "%x", &extra_delta_offset32);
3338 break;
3339 case 'D':
3340 dump_only++;
3341 break;
3342 case 'd':
3343 debug++;
3344 break;
3345 case 'h':
3346 default:
3347 help();
3348 exit(1);
3349 case 'i':
3350 {
3351 double interval = strtod(optarg, NULL);
3352
3353 if (interval < 0.001) {
3354 fprintf(stderr, "interval %f seconds is too small\n",
3355 interval);
3356 exit(2);
3357 }
3358
3359 interval_ts.tv_sec = interval;
3360 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000;
3361 }
3362 break;
3363 case 'J':
3364 rapl_joules++;
3365 break;
3366 case 'M':
3367 sscanf(optarg, "%x", &extra_msr_offset64);
3368 break;
3369 case 'm':
3370 sscanf(optarg, "%x", &extra_msr_offset32);
3371 break;
3372 case 'P':
3373 show_pkg_only++;
3374 break;
3375 case 'p':
3376 show_core_only++;
3377 break;
3378 case 'S':
3379 summary_only++;
3380 break;
3381 case 'T':
3382 tcc_activation_temp_override = atoi(optarg);
3383 break;
3384 case 'v':
3385 print_version();
3386 exit(0);
3387 break;
3388 }
3389 }
3390 }
3391
3392 int main(int argc, char **argv)
3393 {
3394 cmdline(argc, argv);
3395
3396 if (debug)
3397 print_version();
3398
3399 turbostat_init();
3400
3401 /* dump counters and exit */
3402 if (dump_only)
3403 return get_and_dump_counters();
3404
3405 /*
3406 * if any params left, it must be a command to fork
3407 */
3408 if (argc - optind)
3409 return fork_it(argv + optind);
3410 else
3411 turbostat_loop();
3412
3413 return 0;
3414 }