]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/power/x86/turbostat/turbostat.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / tools / power / x86 / turbostat / turbostat.c
1 /*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
4 *
5 * Copyright (c) 2013 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #define _GNU_SOURCE
23 #include MSRHEADER
24 #include INTEL_FAMILY_HEADER
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <err.h>
28 #include <unistd.h>
29 #include <sys/types.h>
30 #include <sys/wait.h>
31 #include <sys/stat.h>
32 #include <sys/resource.h>
33 #include <fcntl.h>
34 #include <signal.h>
35 #include <sys/time.h>
36 #include <stdlib.h>
37 #include <getopt.h>
38 #include <dirent.h>
39 #include <string.h>
40 #include <ctype.h>
41 #include <sched.h>
42 #include <time.h>
43 #include <cpuid.h>
44 #include <linux/capability.h>
45 #include <errno.h>
46
47 char *proc_stat = "/proc/stat";
48 FILE *outf;
49 int *fd_percpu;
50 struct timespec interval_ts = {5, 0};
51 unsigned int debug;
52 unsigned int quiet;
53 unsigned int sums_need_wide_columns;
54 unsigned int rapl_joules;
55 unsigned int summary_only;
56 unsigned int list_header_only;
57 unsigned int dump_only;
58 unsigned int do_snb_cstates;
59 unsigned int do_knl_cstates;
60 unsigned int do_skl_residency;
61 unsigned int do_slm_cstates;
62 unsigned int use_c1_residency_msr;
63 unsigned int has_aperf;
64 unsigned int has_epb;
65 unsigned int do_irtl_snb;
66 unsigned int do_irtl_hsw;
67 unsigned int units = 1000000; /* MHz etc */
68 unsigned int genuine_intel;
69 unsigned int has_invariant_tsc;
70 unsigned int do_nhm_platform_info;
71 unsigned int no_MSR_MISC_PWR_MGMT;
72 unsigned int aperf_mperf_multiplier = 1;
73 double bclk;
74 double base_hz;
75 unsigned int has_base_hz;
76 double tsc_tweak = 1.0;
77 unsigned int show_pkg_only;
78 unsigned int show_core_only;
79 char *output_buffer, *outp;
80 unsigned int do_rapl;
81 unsigned int do_dts;
82 unsigned int do_ptm;
83 unsigned long long gfx_cur_rc6_ms;
84 unsigned int gfx_cur_mhz;
85 unsigned int tcc_activation_temp;
86 unsigned int tcc_activation_temp_override;
87 double rapl_power_units, rapl_time_units;
88 double rapl_dram_energy_units, rapl_energy_units;
89 double rapl_joule_counter_range;
90 unsigned int do_core_perf_limit_reasons;
91 unsigned int do_gfx_perf_limit_reasons;
92 unsigned int do_ring_perf_limit_reasons;
93 unsigned int crystal_hz;
94 unsigned long long tsc_hz;
95 int base_cpu;
96 double discover_bclk(unsigned int family, unsigned int model);
97 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
98 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
99 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
100 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
101 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
102 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
103 unsigned int has_misc_feature_control;
104
105 #define RAPL_PKG (1 << 0)
106 /* 0x610 MSR_PKG_POWER_LIMIT */
107 /* 0x611 MSR_PKG_ENERGY_STATUS */
108 #define RAPL_PKG_PERF_STATUS (1 << 1)
109 /* 0x613 MSR_PKG_PERF_STATUS */
110 #define RAPL_PKG_POWER_INFO (1 << 2)
111 /* 0x614 MSR_PKG_POWER_INFO */
112
113 #define RAPL_DRAM (1 << 3)
114 /* 0x618 MSR_DRAM_POWER_LIMIT */
115 /* 0x619 MSR_DRAM_ENERGY_STATUS */
116 #define RAPL_DRAM_PERF_STATUS (1 << 4)
117 /* 0x61b MSR_DRAM_PERF_STATUS */
118 #define RAPL_DRAM_POWER_INFO (1 << 5)
119 /* 0x61c MSR_DRAM_POWER_INFO */
120
121 #define RAPL_CORES_POWER_LIMIT (1 << 6)
122 /* 0x638 MSR_PP0_POWER_LIMIT */
123 #define RAPL_CORE_POLICY (1 << 7)
124 /* 0x63a MSR_PP0_POLICY */
125
126 #define RAPL_GFX (1 << 8)
127 /* 0x640 MSR_PP1_POWER_LIMIT */
128 /* 0x641 MSR_PP1_ENERGY_STATUS */
129 /* 0x642 MSR_PP1_POLICY */
130
131 #define RAPL_CORES_ENERGY_STATUS (1 << 9)
132 /* 0x639 MSR_PP0_ENERGY_STATUS */
133 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
134 #define TJMAX_DEFAULT 100
135
136 #define MAX(a, b) ((a) > (b) ? (a) : (b))
137
138 /*
139 * buffer size used by sscanf() for added column names
140 * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
141 */
142 #define NAME_BYTES 20
143 #define PATH_BYTES 128
144
145 int backwards_count;
146 char *progname;
147
148 #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */
149 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset;
150 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
151 #define MAX_ADDED_COUNTERS 16
152
153 struct thread_data {
154 unsigned long long tsc;
155 unsigned long long aperf;
156 unsigned long long mperf;
157 unsigned long long c1;
158 unsigned long long irq_count;
159 unsigned int smi_count;
160 unsigned int cpu_id;
161 unsigned int flags;
162 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
163 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
164 unsigned long long counter[MAX_ADDED_COUNTERS];
165 } *thread_even, *thread_odd;
166
167 struct core_data {
168 unsigned long long c3;
169 unsigned long long c6;
170 unsigned long long c7;
171 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
172 unsigned int core_temp_c;
173 unsigned int core_id;
174 unsigned long long counter[MAX_ADDED_COUNTERS];
175 } *core_even, *core_odd;
176
177 struct pkg_data {
178 unsigned long long pc2;
179 unsigned long long pc3;
180 unsigned long long pc6;
181 unsigned long long pc7;
182 unsigned long long pc8;
183 unsigned long long pc9;
184 unsigned long long pc10;
185 unsigned long long pkg_wtd_core_c0;
186 unsigned long long pkg_any_core_c0;
187 unsigned long long pkg_any_gfxe_c0;
188 unsigned long long pkg_both_core_gfxe_c0;
189 long long gfx_rc6_ms;
190 unsigned int gfx_mhz;
191 unsigned int package_id;
192 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
193 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
194 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
195 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
196 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
197 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
198 unsigned int pkg_temp_c;
199 unsigned long long counter[MAX_ADDED_COUNTERS];
200 } *package_even, *package_odd;
201
202 #define ODD_COUNTERS thread_odd, core_odd, package_odd
203 #define EVEN_COUNTERS thread_even, core_even, package_even
204
205 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
206 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
207 topo.num_threads_per_core + \
208 (core_no) * topo.num_threads_per_core + (thread_no))
209 #define GET_CORE(core_base, core_no, pkg_no) \
210 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
211 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
212
213 enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE};
214 enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC};
215 enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT};
216
217 struct msr_counter {
218 unsigned int msr_num;
219 char name[NAME_BYTES];
220 char path[PATH_BYTES];
221 unsigned int width;
222 enum counter_type type;
223 enum counter_format format;
224 struct msr_counter *next;
225 unsigned int flags;
226 #define FLAGS_HIDE (1 << 0)
227 #define FLAGS_SHOW (1 << 1)
228 #define SYSFS_PERCPU (1 << 1)
229 };
230
231 struct sys_counters {
232 unsigned int added_thread_counters;
233 unsigned int added_core_counters;
234 unsigned int added_package_counters;
235 struct msr_counter *tp;
236 struct msr_counter *cp;
237 struct msr_counter *pp;
238 } sys;
239
240 struct system_summary {
241 struct thread_data threads;
242 struct core_data cores;
243 struct pkg_data packages;
244 } average;
245
246
247 struct topo_params {
248 int num_packages;
249 int num_cpus;
250 int num_cores;
251 int max_cpu_num;
252 int num_cores_per_pkg;
253 int num_threads_per_core;
254 } topo;
255
256 struct timeval tv_even, tv_odd, tv_delta;
257
258 int *irq_column_2_cpu; /* /proc/interrupts column numbers */
259 int *irqs_per_cpu; /* indexed by cpu_num */
260
261 void setup_all_buffers(void);
262
263 int cpu_is_not_present(int cpu)
264 {
265 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
266 }
267 /*
268 * run func(thread, core, package) in topology order
269 * skip non-present cpus
270 */
271
272 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
273 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
274 {
275 int retval, pkg_no, core_no, thread_no;
276
277 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
278 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
279 for (thread_no = 0; thread_no <
280 topo.num_threads_per_core; ++thread_no) {
281 struct thread_data *t;
282 struct core_data *c;
283 struct pkg_data *p;
284
285 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
286
287 if (cpu_is_not_present(t->cpu_id))
288 continue;
289
290 c = GET_CORE(core_base, core_no, pkg_no);
291 p = GET_PKG(pkg_base, pkg_no);
292
293 retval = func(t, c, p);
294 if (retval)
295 return retval;
296 }
297 }
298 }
299 return 0;
300 }
301
302 int cpu_migrate(int cpu)
303 {
304 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
305 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
306 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
307 return -1;
308 else
309 return 0;
310 }
311 int get_msr_fd(int cpu)
312 {
313 char pathname[32];
314 int fd;
315
316 fd = fd_percpu[cpu];
317
318 if (fd)
319 return fd;
320
321 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
322 fd = open(pathname, O_RDONLY);
323 if (fd < 0)
324 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
325
326 fd_percpu[cpu] = fd;
327
328 return fd;
329 }
330
331 int get_msr(int cpu, off_t offset, unsigned long long *msr)
332 {
333 ssize_t retval;
334
335 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
336
337 if (retval != sizeof *msr)
338 err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
339
340 return 0;
341 }
342
343 /*
344 * Each string in this array is compared in --show and --hide cmdline.
345 * Thus, strings that are proper sub-sets must follow their more specific peers.
346 */
347 struct msr_counter bic[] = {
348 { 0x0, "Package" },
349 { 0x0, "Avg_MHz" },
350 { 0x0, "Bzy_MHz" },
351 { 0x0, "TSC_MHz" },
352 { 0x0, "IRQ" },
353 { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
354 { 0x0, "Busy%" },
355 { 0x0, "CPU%c1" },
356 { 0x0, "CPU%c3" },
357 { 0x0, "CPU%c6" },
358 { 0x0, "CPU%c7" },
359 { 0x0, "ThreadC" },
360 { 0x0, "CoreTmp" },
361 { 0x0, "CoreCnt" },
362 { 0x0, "PkgTmp" },
363 { 0x0, "GFX%rc6" },
364 { 0x0, "GFXMHz" },
365 { 0x0, "Pkg%pc2" },
366 { 0x0, "Pkg%pc3" },
367 { 0x0, "Pkg%pc6" },
368 { 0x0, "Pkg%pc7" },
369 { 0x0, "Pkg%pc8" },
370 { 0x0, "Pkg%pc9" },
371 { 0x0, "Pkg%pc10" },
372 { 0x0, "PkgWatt" },
373 { 0x0, "CorWatt" },
374 { 0x0, "GFXWatt" },
375 { 0x0, "PkgCnt" },
376 { 0x0, "RAMWatt" },
377 { 0x0, "PKG_%" },
378 { 0x0, "RAM_%" },
379 { 0x0, "Pkg_J" },
380 { 0x0, "Cor_J" },
381 { 0x0, "GFX_J" },
382 { 0x0, "RAM_J" },
383 { 0x0, "Core" },
384 { 0x0, "CPU" },
385 { 0x0, "Mod%c6" },
386 { 0x0, "sysfs" },
387 };
388
389 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
390 #define BIC_Package (1ULL << 0)
391 #define BIC_Avg_MHz (1ULL << 1)
392 #define BIC_Bzy_MHz (1ULL << 2)
393 #define BIC_TSC_MHz (1ULL << 3)
394 #define BIC_IRQ (1ULL << 4)
395 #define BIC_SMI (1ULL << 5)
396 #define BIC_Busy (1ULL << 6)
397 #define BIC_CPU_c1 (1ULL << 7)
398 #define BIC_CPU_c3 (1ULL << 8)
399 #define BIC_CPU_c6 (1ULL << 9)
400 #define BIC_CPU_c7 (1ULL << 10)
401 #define BIC_ThreadC (1ULL << 11)
402 #define BIC_CoreTmp (1ULL << 12)
403 #define BIC_CoreCnt (1ULL << 13)
404 #define BIC_PkgTmp (1ULL << 14)
405 #define BIC_GFX_rc6 (1ULL << 15)
406 #define BIC_GFXMHz (1ULL << 16)
407 #define BIC_Pkgpc2 (1ULL << 17)
408 #define BIC_Pkgpc3 (1ULL << 18)
409 #define BIC_Pkgpc6 (1ULL << 19)
410 #define BIC_Pkgpc7 (1ULL << 20)
411 #define BIC_Pkgpc8 (1ULL << 21)
412 #define BIC_Pkgpc9 (1ULL << 22)
413 #define BIC_Pkgpc10 (1ULL << 23)
414 #define BIC_PkgWatt (1ULL << 24)
415 #define BIC_CorWatt (1ULL << 25)
416 #define BIC_GFXWatt (1ULL << 26)
417 #define BIC_PkgCnt (1ULL << 27)
418 #define BIC_RAMWatt (1ULL << 28)
419 #define BIC_PKG__ (1ULL << 29)
420 #define BIC_RAM__ (1ULL << 30)
421 #define BIC_Pkg_J (1ULL << 31)
422 #define BIC_Cor_J (1ULL << 32)
423 #define BIC_GFX_J (1ULL << 33)
424 #define BIC_RAM_J (1ULL << 34)
425 #define BIC_Core (1ULL << 35)
426 #define BIC_CPU (1ULL << 36)
427 #define BIC_Mod_c6 (1ULL << 37)
428 #define BIC_sysfs (1ULL << 38)
429
430 unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL;
431 unsigned long long bic_present = BIC_sysfs;
432
433 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
434 #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
435 #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
436
437 #define MAX_DEFERRED 16
438 char *deferred_skip_names[MAX_DEFERRED];
439 int deferred_skip_index;
440
441 /*
442 * HIDE_LIST - hide this list of counters, show the rest [default]
443 * SHOW_LIST - show this list of counters, hide the rest
444 */
445 enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST;
446
447 void help(void)
448 {
449 fprintf(outf,
450 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
451 "\n"
452 "Turbostat forks the specified COMMAND and prints statistics\n"
453 "when COMMAND completes.\n"
454 "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
455 "to print statistics, until interrupted.\n"
456 "--add add a counter\n"
457 " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
458 "--cpu cpu-set limit output to summary plus cpu-set:\n"
459 " {core | package | j,k,l..m,n-p }\n"
460 "--quiet skip decoding system configuration header\n"
461 "--interval sec Override default 5-second measurement interval\n"
462 "--help print this help message\n"
463 "--list list column headers only\n"
464 "--out file create or truncate \"file\" for all output\n"
465 "--version print version information\n"
466 "\n"
467 "For more help, run \"man turbostat\"\n");
468 }
469
470 /*
471 * bic_lookup
472 * for all the strings in comma separate name_list,
473 * set the approprate bit in return value.
474 */
475 unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
476 {
477 int i;
478 unsigned long long retval = 0;
479
480 while (name_list) {
481 char *comma;
482
483 comma = strchr(name_list, ',');
484
485 if (comma)
486 *comma = '\0';
487
488 for (i = 0; i < MAX_BIC; ++i) {
489 if (!strcmp(name_list, bic[i].name)) {
490 retval |= (1ULL << i);
491 break;
492 }
493 }
494 if (i == MAX_BIC) {
495 if (mode == SHOW_LIST) {
496 fprintf(stderr, "Invalid counter name: %s\n", name_list);
497 exit(-1);
498 }
499 deferred_skip_names[deferred_skip_index++] = name_list;
500 if (debug)
501 fprintf(stderr, "deferred \"%s\"\n", name_list);
502 if (deferred_skip_index >= MAX_DEFERRED) {
503 fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n",
504 MAX_DEFERRED, name_list);
505 help();
506 exit(1);
507 }
508 }
509
510 name_list = comma;
511 if (name_list)
512 name_list++;
513
514 }
515 return retval;
516 }
517
518
519 void print_header(char *delim)
520 {
521 struct msr_counter *mp;
522 int printed = 0;
523
524 if (DO_BIC(BIC_Package))
525 outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
526 if (DO_BIC(BIC_Core))
527 outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
528 if (DO_BIC(BIC_CPU))
529 outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
530 if (DO_BIC(BIC_Avg_MHz))
531 outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
532 if (DO_BIC(BIC_Busy))
533 outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : ""));
534 if (DO_BIC(BIC_Bzy_MHz))
535 outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : ""));
536 if (DO_BIC(BIC_TSC_MHz))
537 outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : ""));
538
539 if (DO_BIC(BIC_IRQ)) {
540 if (sums_need_wide_columns)
541 outp += sprintf(outp, "%s IRQ", (printed++ ? delim : ""));
542 else
543 outp += sprintf(outp, "%sIRQ", (printed++ ? delim : ""));
544 }
545
546 if (DO_BIC(BIC_SMI))
547 outp += sprintf(outp, "%sSMI", (printed++ ? delim : ""));
548
549 for (mp = sys.tp; mp; mp = mp->next) {
550
551 if (mp->format == FORMAT_RAW) {
552 if (mp->width == 64)
553 outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name);
554 else
555 outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name);
556 } else {
557 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
558 outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name);
559 else
560 outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name);
561 }
562 }
563
564 if (DO_BIC(BIC_CPU_c1))
565 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
566 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates)
567 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
568 if (DO_BIC(BIC_CPU_c6))
569 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
570 if (DO_BIC(BIC_CPU_c7))
571 outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : ""));
572
573 if (DO_BIC(BIC_Mod_c6))
574 outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : ""));
575
576 if (DO_BIC(BIC_CoreTmp))
577 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
578
579 for (mp = sys.cp; mp; mp = mp->next) {
580 if (mp->format == FORMAT_RAW) {
581 if (mp->width == 64)
582 outp += sprintf(outp, "%s%18.18s", delim, mp->name);
583 else
584 outp += sprintf(outp, "%s%10.10s", delim, mp->name);
585 } else {
586 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
587 outp += sprintf(outp, "%s%8s", delim, mp->name);
588 else
589 outp += sprintf(outp, "%s%s", delim, mp->name);
590 }
591 }
592
593 if (DO_BIC(BIC_PkgTmp))
594 outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : ""));
595
596 if (DO_BIC(BIC_GFX_rc6))
597 outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : ""));
598
599 if (DO_BIC(BIC_GFXMHz))
600 outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : ""));
601
602 if (do_skl_residency) {
603 outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
604 outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : ""));
605 outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : ""));
606 outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : ""));
607 }
608
609 if (DO_BIC(BIC_Pkgpc2))
610 outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : ""));
611 if (DO_BIC(BIC_Pkgpc3))
612 outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : ""));
613 if (DO_BIC(BIC_Pkgpc6))
614 outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : ""));
615 if (DO_BIC(BIC_Pkgpc7))
616 outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : ""));
617 if (DO_BIC(BIC_Pkgpc8))
618 outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : ""));
619 if (DO_BIC(BIC_Pkgpc9))
620 outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : ""));
621 if (DO_BIC(BIC_Pkgpc10))
622 outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : ""));
623
624 if (do_rapl && !rapl_joules) {
625 if (DO_BIC(BIC_PkgWatt))
626 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
627 if (DO_BIC(BIC_CorWatt))
628 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
629 if (DO_BIC(BIC_GFXWatt))
630 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
631 if (DO_BIC(BIC_RAMWatt))
632 outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : ""));
633 if (DO_BIC(BIC_PKG__))
634 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : ""));
635 if (DO_BIC(BIC_RAM__))
636 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
637 } else if (do_rapl && rapl_joules) {
638 if (DO_BIC(BIC_Pkg_J))
639 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
640 if (DO_BIC(BIC_Cor_J))
641 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
642 if (DO_BIC(BIC_GFX_J))
643 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
644 if (DO_BIC(BIC_RAM_J))
645 outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : ""));
646 if (DO_BIC(BIC_PKG__))
647 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : ""));
648 if (DO_BIC(BIC_RAM__))
649 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
650 }
651 for (mp = sys.pp; mp; mp = mp->next) {
652 if (mp->format == FORMAT_RAW) {
653 if (mp->width == 64)
654 outp += sprintf(outp, "%s%18.18s", delim, mp->name);
655 else
656 outp += sprintf(outp, "%s%10.10s", delim, mp->name);
657 } else {
658 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
659 outp += sprintf(outp, "%s%8s", delim, mp->name);
660 else
661 outp += sprintf(outp, "%s%s", delim, mp->name);
662 }
663 }
664
665 outp += sprintf(outp, "\n");
666 }
667
668 int dump_counters(struct thread_data *t, struct core_data *c,
669 struct pkg_data *p)
670 {
671 int i;
672 struct msr_counter *mp;
673
674 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
675
676 if (t) {
677 outp += sprintf(outp, "CPU: %d flags 0x%x\n",
678 t->cpu_id, t->flags);
679 outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
680 outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
681 outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
682 outp += sprintf(outp, "c1: %016llX\n", t->c1);
683
684 if (DO_BIC(BIC_IRQ))
685 outp += sprintf(outp, "IRQ: %lld\n", t->irq_count);
686 if (DO_BIC(BIC_SMI))
687 outp += sprintf(outp, "SMI: %d\n", t->smi_count);
688
689 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
690 outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n",
691 i, mp->msr_num, t->counter[i]);
692 }
693 }
694
695 if (c) {
696 outp += sprintf(outp, "core: %d\n", c->core_id);
697 outp += sprintf(outp, "c3: %016llX\n", c->c3);
698 outp += sprintf(outp, "c6: %016llX\n", c->c6);
699 outp += sprintf(outp, "c7: %016llX\n", c->c7);
700 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
701
702 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
703 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
704 i, mp->msr_num, c->counter[i]);
705 }
706 outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us);
707 }
708
709 if (p) {
710 outp += sprintf(outp, "package: %d\n", p->package_id);
711
712 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
713 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
714 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
715 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
716
717 outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
718 if (DO_BIC(BIC_Pkgpc3))
719 outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
720 if (DO_BIC(BIC_Pkgpc6))
721 outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
722 if (DO_BIC(BIC_Pkgpc7))
723 outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
724 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
725 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
726 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
727 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
728 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
729 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
730 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
731 outp += sprintf(outp, "Throttle PKG: %0X\n",
732 p->rapl_pkg_perf_status);
733 outp += sprintf(outp, "Throttle RAM: %0X\n",
734 p->rapl_dram_perf_status);
735 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
736
737 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
738 outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n",
739 i, mp->msr_num, p->counter[i]);
740 }
741 }
742
743 outp += sprintf(outp, "\n");
744
745 return 0;
746 }
747
748 /*
749 * column formatting convention & formats
750 */
751 int format_counters(struct thread_data *t, struct core_data *c,
752 struct pkg_data *p)
753 {
754 double interval_float, tsc;
755 char *fmt8;
756 int i;
757 struct msr_counter *mp;
758 char *delim = "\t";
759 int printed = 0;
760
761 /* if showing only 1st thread in core and this isn't one, bail out */
762 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
763 return 0;
764
765 /* if showing only 1st thread in pkg and this isn't one, bail out */
766 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
767 return 0;
768
769 /*if not summary line and --cpu is used */
770 if ((t != &average.threads) &&
771 (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
772 return 0;
773
774 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
775
776 tsc = t->tsc * tsc_tweak;
777
778 /* topo columns, print blanks on 1st (average) line */
779 if (t == &average.threads) {
780 if (DO_BIC(BIC_Package))
781 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
782 if (DO_BIC(BIC_Core))
783 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
784 if (DO_BIC(BIC_CPU))
785 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
786 } else {
787 if (DO_BIC(BIC_Package)) {
788 if (p)
789 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id);
790 else
791 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
792 }
793 if (DO_BIC(BIC_Core)) {
794 if (c)
795 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id);
796 else
797 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
798 }
799 if (DO_BIC(BIC_CPU))
800 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
801 }
802
803 if (DO_BIC(BIC_Avg_MHz))
804 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""),
805 1.0 / units * t->aperf / interval_float);
806
807 if (DO_BIC(BIC_Busy))
808 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc);
809
810 if (DO_BIC(BIC_Bzy_MHz)) {
811 if (has_base_hz)
812 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf);
813 else
814 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""),
815 tsc / units * t->aperf / t->mperf / interval_float);
816 }
817
818 if (DO_BIC(BIC_TSC_MHz))
819 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float);
820
821 /* IRQ */
822 if (DO_BIC(BIC_IRQ)) {
823 if (sums_need_wide_columns)
824 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count);
825 else
826 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count);
827 }
828
829 /* SMI */
830 if (DO_BIC(BIC_SMI))
831 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count);
832
833 /* Added counters */
834 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
835 if (mp->format == FORMAT_RAW) {
836 if (mp->width == 32)
837 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]);
838 else
839 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]);
840 } else if (mp->format == FORMAT_DELTA) {
841 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
842 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]);
843 else
844 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]);
845 } else if (mp->format == FORMAT_PERCENT) {
846 if (mp->type == COUNTER_USEC)
847 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000);
848 else
849 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc);
850 }
851 }
852
853 /* C1 */
854 if (DO_BIC(BIC_CPU_c1))
855 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc);
856
857
858 /* print per-core data only for 1st thread in core */
859 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
860 goto done;
861
862 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates)
863 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
864 if (DO_BIC(BIC_CPU_c6))
865 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
866 if (DO_BIC(BIC_CPU_c7))
867 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc);
868
869 /* Mod%c6 */
870 if (DO_BIC(BIC_Mod_c6))
871 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc);
872
873 if (DO_BIC(BIC_CoreTmp))
874 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c);
875
876 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
877 if (mp->format == FORMAT_RAW) {
878 if (mp->width == 32)
879 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]);
880 else
881 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]);
882 } else if (mp->format == FORMAT_DELTA) {
883 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
884 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]);
885 else
886 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]);
887 } else if (mp->format == FORMAT_PERCENT) {
888 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc);
889 }
890 }
891
892 /* print per-package data only for 1st core in package */
893 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
894 goto done;
895
896 /* PkgTmp */
897 if (DO_BIC(BIC_PkgTmp))
898 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c);
899
900 /* GFXrc6 */
901 if (DO_BIC(BIC_GFX_rc6)) {
902 if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */
903 outp += sprintf(outp, "%s**.**", (printed++ ? delim : ""));
904 } else {
905 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
906 p->gfx_rc6_ms / 10.0 / interval_float);
907 }
908 }
909
910 /* GFXMHz */
911 if (DO_BIC(BIC_GFXMHz))
912 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
913
914 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
915 if (do_skl_residency) {
916 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
917 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc);
918 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc);
919 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc);
920 }
921
922 if (DO_BIC(BIC_Pkgpc2))
923 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc);
924 if (DO_BIC(BIC_Pkgpc3))
925 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc);
926 if (DO_BIC(BIC_Pkgpc6))
927 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc);
928 if (DO_BIC(BIC_Pkgpc7))
929 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc);
930 if (DO_BIC(BIC_Pkgpc8))
931 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc);
932 if (DO_BIC(BIC_Pkgpc9))
933 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc);
934 if (DO_BIC(BIC_Pkgpc10))
935 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc);
936
937 /*
938 * If measurement interval exceeds minimum RAPL Joule Counter range,
939 * indicate that results are suspect by printing "**" in fraction place.
940 */
941 if (interval_float < rapl_joule_counter_range)
942 fmt8 = "%s%.2f";
943 else
944 fmt8 = "%6.0f**";
945
946 if (DO_BIC(BIC_PkgWatt))
947 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
948 if (DO_BIC(BIC_CorWatt))
949 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
950 if (DO_BIC(BIC_GFXWatt))
951 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
952 if (DO_BIC(BIC_RAMWatt))
953 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
954 if (DO_BIC(BIC_Pkg_J))
955 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
956 if (DO_BIC(BIC_Cor_J))
957 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
958 if (DO_BIC(BIC_GFX_J))
959 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
960 if (DO_BIC(BIC_RAM_J))
961 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units);
962 if (DO_BIC(BIC_PKG__))
963 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
964 if (DO_BIC(BIC_RAM__))
965 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
966
967 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
968 if (mp->format == FORMAT_RAW) {
969 if (mp->width == 32)
970 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]);
971 else
972 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]);
973 } else if (mp->format == FORMAT_DELTA) {
974 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
975 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]);
976 else
977 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]);
978 } else if (mp->format == FORMAT_PERCENT) {
979 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc);
980 }
981 }
982
983 done:
984 outp += sprintf(outp, "\n");
985
986 return 0;
987 }
988
989 void flush_output_stdout(void)
990 {
991 FILE *filep;
992
993 if (outf == stderr)
994 filep = stdout;
995 else
996 filep = outf;
997
998 fputs(output_buffer, filep);
999 fflush(filep);
1000
1001 outp = output_buffer;
1002 }
1003 void flush_output_stderr(void)
1004 {
1005 fputs(output_buffer, outf);
1006 fflush(outf);
1007 outp = output_buffer;
1008 }
1009 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1010 {
1011 static int printed;
1012
1013 if (!printed || !summary_only)
1014 print_header("\t");
1015
1016 if (topo.num_cpus > 1)
1017 format_counters(&average.threads, &average.cores,
1018 &average.packages);
1019
1020 printed = 1;
1021
1022 if (summary_only)
1023 return;
1024
1025 for_all_cpus(format_counters, t, c, p);
1026 }
1027
1028 #define DELTA_WRAP32(new, old) \
1029 if (new > old) { \
1030 old = new - old; \
1031 } else { \
1032 old = 0x100000000 + new - old; \
1033 }
1034
1035 int
1036 delta_package(struct pkg_data *new, struct pkg_data *old)
1037 {
1038 int i;
1039 struct msr_counter *mp;
1040
1041 if (do_skl_residency) {
1042 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
1043 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
1044 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
1045 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
1046 }
1047 old->pc2 = new->pc2 - old->pc2;
1048 if (DO_BIC(BIC_Pkgpc3))
1049 old->pc3 = new->pc3 - old->pc3;
1050 if (DO_BIC(BIC_Pkgpc6))
1051 old->pc6 = new->pc6 - old->pc6;
1052 if (DO_BIC(BIC_Pkgpc7))
1053 old->pc7 = new->pc7 - old->pc7;
1054 old->pc8 = new->pc8 - old->pc8;
1055 old->pc9 = new->pc9 - old->pc9;
1056 old->pc10 = new->pc10 - old->pc10;
1057 old->pkg_temp_c = new->pkg_temp_c;
1058
1059 /* flag an error when rc6 counter resets/wraps */
1060 if (old->gfx_rc6_ms > new->gfx_rc6_ms)
1061 old->gfx_rc6_ms = -1;
1062 else
1063 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
1064
1065 old->gfx_mhz = new->gfx_mhz;
1066
1067 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
1068 DELTA_WRAP32(new->energy_cores, old->energy_cores);
1069 DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
1070 DELTA_WRAP32(new->energy_dram, old->energy_dram);
1071 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
1072 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
1073
1074 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1075 if (mp->format == FORMAT_RAW)
1076 old->counter[i] = new->counter[i];
1077 else
1078 old->counter[i] = new->counter[i] - old->counter[i];
1079 }
1080
1081 return 0;
1082 }
1083
1084 void
1085 delta_core(struct core_data *new, struct core_data *old)
1086 {
1087 int i;
1088 struct msr_counter *mp;
1089
1090 old->c3 = new->c3 - old->c3;
1091 old->c6 = new->c6 - old->c6;
1092 old->c7 = new->c7 - old->c7;
1093 old->core_temp_c = new->core_temp_c;
1094 old->mc6_us = new->mc6_us - old->mc6_us;
1095
1096 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1097 if (mp->format == FORMAT_RAW)
1098 old->counter[i] = new->counter[i];
1099 else
1100 old->counter[i] = new->counter[i] - old->counter[i];
1101 }
1102 }
1103
1104 /*
1105 * old = new - old
1106 */
1107 int
1108 delta_thread(struct thread_data *new, struct thread_data *old,
1109 struct core_data *core_delta)
1110 {
1111 int i;
1112 struct msr_counter *mp;
1113
1114 old->tsc = new->tsc - old->tsc;
1115
1116 /* check for TSC < 1 Mcycles over interval */
1117 if (old->tsc < (1000 * 1000))
1118 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
1119 "You can disable all c-states by booting with \"idle=poll\"\n"
1120 "or just the deep ones with \"processor.max_cstate=1\"");
1121
1122 old->c1 = new->c1 - old->c1;
1123
1124 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
1125 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1126 old->aperf = new->aperf - old->aperf;
1127 old->mperf = new->mperf - old->mperf;
1128 } else {
1129 return -1;
1130 }
1131 }
1132
1133
1134 if (use_c1_residency_msr) {
1135 /*
1136 * Some models have a dedicated C1 residency MSR,
1137 * which should be more accurate than the derivation below.
1138 */
1139 } else {
1140 /*
1141 * As counter collection is not atomic,
1142 * it is possible for mperf's non-halted cycles + idle states
1143 * to exceed TSC's all cycles: show c1 = 0% in that case.
1144 */
1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
1146 old->c1 = 0;
1147 else {
1148 /* normal case, derive c1 */
1149 old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3
1150 - core_delta->c6 - core_delta->c7;
1151 }
1152 }
1153
1154 if (old->mperf == 0) {
1155 if (debug > 1)
1156 fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id);
1157 old->mperf = 1; /* divide by 0 protection */
1158 }
1159
1160 if (DO_BIC(BIC_IRQ))
1161 old->irq_count = new->irq_count - old->irq_count;
1162
1163 if (DO_BIC(BIC_SMI))
1164 old->smi_count = new->smi_count - old->smi_count;
1165
1166 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1167 if (mp->format == FORMAT_RAW)
1168 old->counter[i] = new->counter[i];
1169 else
1170 old->counter[i] = new->counter[i] - old->counter[i];
1171 }
1172 return 0;
1173 }
1174
1175 int delta_cpu(struct thread_data *t, struct core_data *c,
1176 struct pkg_data *p, struct thread_data *t2,
1177 struct core_data *c2, struct pkg_data *p2)
1178 {
1179 int retval = 0;
1180
1181 /* calculate core delta only for 1st thread in core */
1182 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
1183 delta_core(c, c2);
1184
1185 /* always calculate thread delta */
1186 retval = delta_thread(t, t2, c2); /* c2 is core delta */
1187 if (retval)
1188 return retval;
1189
1190 /* calculate package delta only for 1st core in package */
1191 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
1192 retval = delta_package(p, p2);
1193
1194 return retval;
1195 }
1196
1197 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1198 {
1199 int i;
1200 struct msr_counter *mp;
1201
1202 t->tsc = 0;
1203 t->aperf = 0;
1204 t->mperf = 0;
1205 t->c1 = 0;
1206
1207 t->irq_count = 0;
1208 t->smi_count = 0;
1209
1210 /* tells format_counters to dump all fields from this set */
1211 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
1212
1213 c->c3 = 0;
1214 c->c6 = 0;
1215 c->c7 = 0;
1216 c->mc6_us = 0;
1217 c->core_temp_c = 0;
1218
1219 p->pkg_wtd_core_c0 = 0;
1220 p->pkg_any_core_c0 = 0;
1221 p->pkg_any_gfxe_c0 = 0;
1222 p->pkg_both_core_gfxe_c0 = 0;
1223
1224 p->pc2 = 0;
1225 if (DO_BIC(BIC_Pkgpc3))
1226 p->pc3 = 0;
1227 if (DO_BIC(BIC_Pkgpc6))
1228 p->pc6 = 0;
1229 if (DO_BIC(BIC_Pkgpc7))
1230 p->pc7 = 0;
1231 p->pc8 = 0;
1232 p->pc9 = 0;
1233 p->pc10 = 0;
1234
1235 p->energy_pkg = 0;
1236 p->energy_dram = 0;
1237 p->energy_cores = 0;
1238 p->energy_gfx = 0;
1239 p->rapl_pkg_perf_status = 0;
1240 p->rapl_dram_perf_status = 0;
1241 p->pkg_temp_c = 0;
1242
1243 p->gfx_rc6_ms = 0;
1244 p->gfx_mhz = 0;
1245 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
1246 t->counter[i] = 0;
1247
1248 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next)
1249 c->counter[i] = 0;
1250
1251 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next)
1252 p->counter[i] = 0;
1253 }
1254 int sum_counters(struct thread_data *t, struct core_data *c,
1255 struct pkg_data *p)
1256 {
1257 int i;
1258 struct msr_counter *mp;
1259
1260 average.threads.tsc += t->tsc;
1261 average.threads.aperf += t->aperf;
1262 average.threads.mperf += t->mperf;
1263 average.threads.c1 += t->c1;
1264
1265 average.threads.irq_count += t->irq_count;
1266 average.threads.smi_count += t->smi_count;
1267
1268 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1269 if (mp->format == FORMAT_RAW)
1270 continue;
1271 average.threads.counter[i] += t->counter[i];
1272 }
1273
1274 /* sum per-core values only for 1st thread in core */
1275 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1276 return 0;
1277
1278 average.cores.c3 += c->c3;
1279 average.cores.c6 += c->c6;
1280 average.cores.c7 += c->c7;
1281 average.cores.mc6_us += c->mc6_us;
1282
1283 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
1284
1285 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1286 if (mp->format == FORMAT_RAW)
1287 continue;
1288 average.cores.counter[i] += c->counter[i];
1289 }
1290
1291 /* sum per-pkg values only for 1st core in pkg */
1292 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1293 return 0;
1294
1295 if (do_skl_residency) {
1296 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
1297 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
1298 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
1299 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
1300 }
1301
1302 average.packages.pc2 += p->pc2;
1303 if (DO_BIC(BIC_Pkgpc3))
1304 average.packages.pc3 += p->pc3;
1305 if (DO_BIC(BIC_Pkgpc6))
1306 average.packages.pc6 += p->pc6;
1307 if (DO_BIC(BIC_Pkgpc7))
1308 average.packages.pc7 += p->pc7;
1309 average.packages.pc8 += p->pc8;
1310 average.packages.pc9 += p->pc9;
1311 average.packages.pc10 += p->pc10;
1312
1313 average.packages.energy_pkg += p->energy_pkg;
1314 average.packages.energy_dram += p->energy_dram;
1315 average.packages.energy_cores += p->energy_cores;
1316 average.packages.energy_gfx += p->energy_gfx;
1317
1318 average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
1319 average.packages.gfx_mhz = p->gfx_mhz;
1320
1321 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
1322
1323 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
1324 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
1325
1326 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1327 if (mp->format == FORMAT_RAW)
1328 continue;
1329 average.packages.counter[i] += p->counter[i];
1330 }
1331 return 0;
1332 }
1333 /*
1334 * sum the counters for all cpus in the system
1335 * compute the weighted average
1336 */
1337 void compute_average(struct thread_data *t, struct core_data *c,
1338 struct pkg_data *p)
1339 {
1340 int i;
1341 struct msr_counter *mp;
1342
1343 clear_counters(&average.threads, &average.cores, &average.packages);
1344
1345 for_all_cpus(sum_counters, t, c, p);
1346
1347 average.threads.tsc /= topo.num_cpus;
1348 average.threads.aperf /= topo.num_cpus;
1349 average.threads.mperf /= topo.num_cpus;
1350 average.threads.c1 /= topo.num_cpus;
1351
1352 if (average.threads.irq_count > 9999999)
1353 sums_need_wide_columns = 1;
1354
1355 average.cores.c3 /= topo.num_cores;
1356 average.cores.c6 /= topo.num_cores;
1357 average.cores.c7 /= topo.num_cores;
1358 average.cores.mc6_us /= topo.num_cores;
1359
1360 if (do_skl_residency) {
1361 average.packages.pkg_wtd_core_c0 /= topo.num_packages;
1362 average.packages.pkg_any_core_c0 /= topo.num_packages;
1363 average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
1364 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
1365 }
1366
1367 average.packages.pc2 /= topo.num_packages;
1368 if (DO_BIC(BIC_Pkgpc3))
1369 average.packages.pc3 /= topo.num_packages;
1370 if (DO_BIC(BIC_Pkgpc6))
1371 average.packages.pc6 /= topo.num_packages;
1372 if (DO_BIC(BIC_Pkgpc7))
1373 average.packages.pc7 /= topo.num_packages;
1374
1375 average.packages.pc8 /= topo.num_packages;
1376 average.packages.pc9 /= topo.num_packages;
1377 average.packages.pc10 /= topo.num_packages;
1378
1379 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1380 if (mp->format == FORMAT_RAW)
1381 continue;
1382 if (mp->type == COUNTER_ITEMS) {
1383 if (average.threads.counter[i] > 9999999)
1384 sums_need_wide_columns = 1;
1385 continue;
1386 }
1387 average.threads.counter[i] /= topo.num_cpus;
1388 }
1389 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1390 if (mp->format == FORMAT_RAW)
1391 continue;
1392 if (mp->type == COUNTER_ITEMS) {
1393 if (average.cores.counter[i] > 9999999)
1394 sums_need_wide_columns = 1;
1395 }
1396 average.cores.counter[i] /= topo.num_cores;
1397 }
1398 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1399 if (mp->format == FORMAT_RAW)
1400 continue;
1401 if (mp->type == COUNTER_ITEMS) {
1402 if (average.packages.counter[i] > 9999999)
1403 sums_need_wide_columns = 1;
1404 }
1405 average.packages.counter[i] /= topo.num_packages;
1406 }
1407 }
1408
1409 static unsigned long long rdtsc(void)
1410 {
1411 unsigned int low, high;
1412
1413 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1414
1415 return low | ((unsigned long long)high) << 32;
1416 }
1417
1418 /*
1419 * Open a file, and exit on failure
1420 */
1421 FILE *fopen_or_die(const char *path, const char *mode)
1422 {
1423 FILE *filep = fopen(path, mode);
1424
1425 if (!filep)
1426 err(1, "%s: open failed", path);
1427 return filep;
1428 }
1429 /*
1430 * snapshot_sysfs_counter()
1431 *
1432 * return snapshot of given counter
1433 */
1434 unsigned long long snapshot_sysfs_counter(char *path)
1435 {
1436 FILE *fp;
1437 int retval;
1438 unsigned long long counter;
1439
1440 fp = fopen_or_die(path, "r");
1441
1442 retval = fscanf(fp, "%lld", &counter);
1443 if (retval != 1)
1444 err(1, "snapshot_sysfs_counter(%s)", path);
1445
1446 fclose(fp);
1447
1448 return counter;
1449 }
1450
1451 int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
1452 {
1453 if (mp->msr_num != 0) {
1454 if (get_msr(cpu, mp->msr_num, counterp))
1455 return -1;
1456 } else {
1457 char path[128];
1458
1459 if (mp->flags & SYSFS_PERCPU) {
1460 sprintf(path, "/sys/devices/system/cpu/cpu%d/%s",
1461 cpu, mp->path);
1462
1463 *counterp = snapshot_sysfs_counter(path);
1464 } else {
1465 *counterp = snapshot_sysfs_counter(mp->path);
1466 }
1467 }
1468
1469 return 0;
1470 }
1471
1472 /*
1473 * get_counters(...)
1474 * migrate to cpu
1475 * acquire and record local counters for that cpu
1476 */
1477 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1478 {
1479 int cpu = t->cpu_id;
1480 unsigned long long msr;
1481 int aperf_mperf_retry_count = 0;
1482 struct msr_counter *mp;
1483 int i;
1484
1485 if (cpu_migrate(cpu)) {
1486 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1487 return -1;
1488 }
1489
1490 retry:
1491 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1492
1493 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
1494 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1495
1496 /*
1497 * The TSC, APERF and MPERF must be read together for
1498 * APERF/MPERF and MPERF/TSC to give accurate results.
1499 *
1500 * Unfortunately, APERF and MPERF are read by
1501 * individual system call, so delays may occur
1502 * between them. If the time to read them
1503 * varies by a large amount, we re-read them.
1504 */
1505
1506 /*
1507 * This initial dummy APERF read has been seen to
1508 * reduce jitter in the subsequent reads.
1509 */
1510
1511 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1512 return -3;
1513
1514 t->tsc = rdtsc(); /* re-read close to APERF */
1515
1516 tsc_before = t->tsc;
1517
1518 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1519 return -3;
1520
1521 tsc_between = rdtsc();
1522
1523 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
1524 return -4;
1525
1526 tsc_after = rdtsc();
1527
1528 aperf_time = tsc_between - tsc_before;
1529 mperf_time = tsc_after - tsc_between;
1530
1531 /*
1532 * If the system call latency to read APERF and MPERF
1533 * differ by more than 2x, then try again.
1534 */
1535 if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
1536 aperf_mperf_retry_count++;
1537 if (aperf_mperf_retry_count < 5)
1538 goto retry;
1539 else
1540 warnx("cpu%d jitter %lld %lld",
1541 cpu, aperf_time, mperf_time);
1542 }
1543 aperf_mperf_retry_count = 0;
1544
1545 t->aperf = t->aperf * aperf_mperf_multiplier;
1546 t->mperf = t->mperf * aperf_mperf_multiplier;
1547 }
1548
1549 if (DO_BIC(BIC_IRQ))
1550 t->irq_count = irqs_per_cpu[cpu];
1551 if (DO_BIC(BIC_SMI)) {
1552 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
1553 return -5;
1554 t->smi_count = msr & 0xFFFFFFFF;
1555 }
1556 if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) {
1557 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1558 return -6;
1559 }
1560
1561 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1562 if (get_mp(cpu, mp, &t->counter[i]))
1563 return -10;
1564 }
1565
1566 /* collect core counters only for 1st thread in core */
1567 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1568 return 0;
1569
1570 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) {
1571 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1572 return -6;
1573 }
1574
1575 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
1576 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1577 return -7;
1578 } else if (do_knl_cstates) {
1579 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1580 return -7;
1581 }
1582
1583 if (DO_BIC(BIC_CPU_c7))
1584 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1585 return -8;
1586
1587 if (DO_BIC(BIC_Mod_c6))
1588 if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us))
1589 return -8;
1590
1591 if (DO_BIC(BIC_CoreTmp)) {
1592 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1593 return -9;
1594 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1595 }
1596
1597 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1598 if (get_mp(cpu, mp, &c->counter[i]))
1599 return -10;
1600 }
1601
1602 /* collect package counters only for 1st core in package */
1603 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1604 return 0;
1605
1606 if (do_skl_residency) {
1607 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1608 return -10;
1609 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1610 return -11;
1611 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1612 return -12;
1613 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1614 return -13;
1615 }
1616 if (DO_BIC(BIC_Pkgpc3))
1617 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1618 return -9;
1619 if (DO_BIC(BIC_Pkgpc6)) {
1620 if (do_slm_cstates) {
1621 if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6))
1622 return -10;
1623 } else {
1624 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1625 return -10;
1626 }
1627 }
1628
1629 if (DO_BIC(BIC_Pkgpc2))
1630 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1631 return -11;
1632 if (DO_BIC(BIC_Pkgpc7))
1633 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1634 return -12;
1635 if (DO_BIC(BIC_Pkgpc8))
1636 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1637 return -13;
1638 if (DO_BIC(BIC_Pkgpc9))
1639 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1640 return -13;
1641 if (DO_BIC(BIC_Pkgpc10))
1642 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1643 return -13;
1644
1645 if (do_rapl & RAPL_PKG) {
1646 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1647 return -13;
1648 p->energy_pkg = msr & 0xFFFFFFFF;
1649 }
1650 if (do_rapl & RAPL_CORES_ENERGY_STATUS) {
1651 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1652 return -14;
1653 p->energy_cores = msr & 0xFFFFFFFF;
1654 }
1655 if (do_rapl & RAPL_DRAM) {
1656 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1657 return -15;
1658 p->energy_dram = msr & 0xFFFFFFFF;
1659 }
1660 if (do_rapl & RAPL_GFX) {
1661 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1662 return -16;
1663 p->energy_gfx = msr & 0xFFFFFFFF;
1664 }
1665 if (do_rapl & RAPL_PKG_PERF_STATUS) {
1666 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1667 return -16;
1668 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1669 }
1670 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1671 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1672 return -16;
1673 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1674 }
1675 if (DO_BIC(BIC_PkgTmp)) {
1676 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1677 return -17;
1678 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1679 }
1680
1681 if (DO_BIC(BIC_GFX_rc6))
1682 p->gfx_rc6_ms = gfx_cur_rc6_ms;
1683
1684 if (DO_BIC(BIC_GFXMHz))
1685 p->gfx_mhz = gfx_cur_mhz;
1686
1687 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1688 if (get_mp(cpu, mp, &p->counter[i]))
1689 return -10;
1690 }
1691
1692 return 0;
1693 }
1694
1695 /*
1696 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1697 * If you change the values, note they are used both in comparisons
1698 * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1699 */
1700
1701 #define PCLUKN 0 /* Unknown */
1702 #define PCLRSV 1 /* Reserved */
1703 #define PCL__0 2 /* PC0 */
1704 #define PCL__1 3 /* PC1 */
1705 #define PCL__2 4 /* PC2 */
1706 #define PCL__3 5 /* PC3 */
1707 #define PCL__4 6 /* PC4 */
1708 #define PCL__6 7 /* PC6 */
1709 #define PCL_6N 8 /* PC6 No Retention */
1710 #define PCL_6R 9 /* PC6 Retention */
1711 #define PCL__7 10 /* PC7 */
1712 #define PCL_7S 11 /* PC7 Shrink */
1713 #define PCL__8 12 /* PC8 */
1714 #define PCL__9 13 /* PC9 */
1715 #define PCLUNL 14 /* Unlimited */
1716
1717 int pkg_cstate_limit = PCLUKN;
1718 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1719 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1720
1721 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1722 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1723 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1724 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
1725 int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1726 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1727 int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1728 int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1729
1730
1731 static void
1732 calculate_tsc_tweak()
1733 {
1734 tsc_tweak = base_hz / tsc_hz;
1735 }
1736
1737 static void
1738 dump_nhm_platform_info(void)
1739 {
1740 unsigned long long msr;
1741 unsigned int ratio;
1742
1743 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1744
1745 fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1746
1747 ratio = (msr >> 40) & 0xFF;
1748 fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n",
1749 ratio, bclk, ratio * bclk);
1750
1751 ratio = (msr >> 8) & 0xFF;
1752 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
1753 ratio, bclk, ratio * bclk);
1754
1755 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1756 fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1757 base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1758
1759 return;
1760 }
1761
1762 static void
1763 dump_hsw_turbo_ratio_limits(void)
1764 {
1765 unsigned long long msr;
1766 unsigned int ratio;
1767
1768 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1769
1770 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1771
1772 ratio = (msr >> 8) & 0xFF;
1773 if (ratio)
1774 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n",
1775 ratio, bclk, ratio * bclk);
1776
1777 ratio = (msr >> 0) & 0xFF;
1778 if (ratio)
1779 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n",
1780 ratio, bclk, ratio * bclk);
1781 return;
1782 }
1783
1784 static void
1785 dump_ivt_turbo_ratio_limits(void)
1786 {
1787 unsigned long long msr;
1788 unsigned int ratio;
1789
1790 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1791
1792 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1793
1794 ratio = (msr >> 56) & 0xFF;
1795 if (ratio)
1796 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n",
1797 ratio, bclk, ratio * bclk);
1798
1799 ratio = (msr >> 48) & 0xFF;
1800 if (ratio)
1801 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n",
1802 ratio, bclk, ratio * bclk);
1803
1804 ratio = (msr >> 40) & 0xFF;
1805 if (ratio)
1806 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n",
1807 ratio, bclk, ratio * bclk);
1808
1809 ratio = (msr >> 32) & 0xFF;
1810 if (ratio)
1811 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n",
1812 ratio, bclk, ratio * bclk);
1813
1814 ratio = (msr >> 24) & 0xFF;
1815 if (ratio)
1816 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n",
1817 ratio, bclk, ratio * bclk);
1818
1819 ratio = (msr >> 16) & 0xFF;
1820 if (ratio)
1821 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n",
1822 ratio, bclk, ratio * bclk);
1823
1824 ratio = (msr >> 8) & 0xFF;
1825 if (ratio)
1826 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n",
1827 ratio, bclk, ratio * bclk);
1828
1829 ratio = (msr >> 0) & 0xFF;
1830 if (ratio)
1831 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n",
1832 ratio, bclk, ratio * bclk);
1833 return;
1834 }
1835 int has_turbo_ratio_group_limits(int family, int model)
1836 {
1837
1838 if (!genuine_intel)
1839 return 0;
1840
1841 switch (model) {
1842 case INTEL_FAM6_ATOM_GOLDMONT:
1843 case INTEL_FAM6_SKYLAKE_X:
1844 case INTEL_FAM6_ATOM_DENVERTON:
1845 return 1;
1846 }
1847 return 0;
1848 }
1849
1850 static void
1851 dump_turbo_ratio_limits(int family, int model)
1852 {
1853 unsigned long long msr, core_counts;
1854 unsigned int ratio, group_size;
1855
1856 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1857 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1858
1859 if (has_turbo_ratio_group_limits(family, model)) {
1860 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
1861 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts);
1862 } else {
1863 core_counts = 0x0807060504030201;
1864 }
1865
1866 ratio = (msr >> 56) & 0xFF;
1867 group_size = (core_counts >> 56) & 0xFF;
1868 if (ratio)
1869 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1870 ratio, bclk, ratio * bclk, group_size);
1871
1872 ratio = (msr >> 48) & 0xFF;
1873 group_size = (core_counts >> 48) & 0xFF;
1874 if (ratio)
1875 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1876 ratio, bclk, ratio * bclk, group_size);
1877
1878 ratio = (msr >> 40) & 0xFF;
1879 group_size = (core_counts >> 40) & 0xFF;
1880 if (ratio)
1881 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1882 ratio, bclk, ratio * bclk, group_size);
1883
1884 ratio = (msr >> 32) & 0xFF;
1885 group_size = (core_counts >> 32) & 0xFF;
1886 if (ratio)
1887 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1888 ratio, bclk, ratio * bclk, group_size);
1889
1890 ratio = (msr >> 24) & 0xFF;
1891 group_size = (core_counts >> 24) & 0xFF;
1892 if (ratio)
1893 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1894 ratio, bclk, ratio * bclk, group_size);
1895
1896 ratio = (msr >> 16) & 0xFF;
1897 group_size = (core_counts >> 16) & 0xFF;
1898 if (ratio)
1899 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1900 ratio, bclk, ratio * bclk, group_size);
1901
1902 ratio = (msr >> 8) & 0xFF;
1903 group_size = (core_counts >> 8) & 0xFF;
1904 if (ratio)
1905 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1906 ratio, bclk, ratio * bclk, group_size);
1907
1908 ratio = (msr >> 0) & 0xFF;
1909 group_size = (core_counts >> 0) & 0xFF;
1910 if (ratio)
1911 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
1912 ratio, bclk, ratio * bclk, group_size);
1913 return;
1914 }
1915
1916 static void
1917 dump_atom_turbo_ratio_limits(void)
1918 {
1919 unsigned long long msr;
1920 unsigned int ratio;
1921
1922 get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr);
1923 fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
1924
1925 ratio = (msr >> 0) & 0x3F;
1926 if (ratio)
1927 fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n",
1928 ratio, bclk, ratio * bclk);
1929
1930 ratio = (msr >> 8) & 0x3F;
1931 if (ratio)
1932 fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n",
1933 ratio, bclk, ratio * bclk);
1934
1935 ratio = (msr >> 16) & 0x3F;
1936 if (ratio)
1937 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
1938 ratio, bclk, ratio * bclk);
1939
1940 get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr);
1941 fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
1942
1943 ratio = (msr >> 24) & 0x3F;
1944 if (ratio)
1945 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n",
1946 ratio, bclk, ratio * bclk);
1947
1948 ratio = (msr >> 16) & 0x3F;
1949 if (ratio)
1950 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n",
1951 ratio, bclk, ratio * bclk);
1952
1953 ratio = (msr >> 8) & 0x3F;
1954 if (ratio)
1955 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n",
1956 ratio, bclk, ratio * bclk);
1957
1958 ratio = (msr >> 0) & 0x3F;
1959 if (ratio)
1960 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n",
1961 ratio, bclk, ratio * bclk);
1962 }
1963
1964 static void
1965 dump_knl_turbo_ratio_limits(void)
1966 {
1967 const unsigned int buckets_no = 7;
1968
1969 unsigned long long msr;
1970 int delta_cores, delta_ratio;
1971 int i, b_nr;
1972 unsigned int cores[buckets_no];
1973 unsigned int ratio[buckets_no];
1974
1975 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1976
1977 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
1978 base_cpu, msr);
1979
1980 /**
1981 * Turbo encoding in KNL is as follows:
1982 * [0] -- Reserved
1983 * [7:1] -- Base value of number of active cores of bucket 1.
1984 * [15:8] -- Base value of freq ratio of bucket 1.
1985 * [20:16] -- +ve delta of number of active cores of bucket 2.
1986 * i.e. active cores of bucket 2 =
1987 * active cores of bucket 1 + delta
1988 * [23:21] -- Negative delta of freq ratio of bucket 2.
1989 * i.e. freq ratio of bucket 2 =
1990 * freq ratio of bucket 1 - delta
1991 * [28:24]-- +ve delta of number of active cores of bucket 3.
1992 * [31:29]-- -ve delta of freq ratio of bucket 3.
1993 * [36:32]-- +ve delta of number of active cores of bucket 4.
1994 * [39:37]-- -ve delta of freq ratio of bucket 4.
1995 * [44:40]-- +ve delta of number of active cores of bucket 5.
1996 * [47:45]-- -ve delta of freq ratio of bucket 5.
1997 * [52:48]-- +ve delta of number of active cores of bucket 6.
1998 * [55:53]-- -ve delta of freq ratio of bucket 6.
1999 * [60:56]-- +ve delta of number of active cores of bucket 7.
2000 * [63:61]-- -ve delta of freq ratio of bucket 7.
2001 */
2002
2003 b_nr = 0;
2004 cores[b_nr] = (msr & 0xFF) >> 1;
2005 ratio[b_nr] = (msr >> 8) & 0xFF;
2006
2007 for (i = 16; i < 64; i += 8) {
2008 delta_cores = (msr >> i) & 0x1F;
2009 delta_ratio = (msr >> (i + 5)) & 0x7;
2010
2011 cores[b_nr + 1] = cores[b_nr] + delta_cores;
2012 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio;
2013 b_nr++;
2014 }
2015
2016 for (i = buckets_no - 1; i >= 0; i--)
2017 if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
2018 fprintf(outf,
2019 "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2020 ratio[i], bclk, ratio[i] * bclk, cores[i]);
2021 }
2022
2023 static void
2024 dump_nhm_cst_cfg(void)
2025 {
2026 unsigned long long msr;
2027
2028 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
2029
2030 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
2031 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
2032
2033 fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr);
2034
2035 fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
2036 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
2037 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
2038 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
2039 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
2040 (msr & (1 << 15)) ? "" : "UN",
2041 (unsigned int)msr & 0xF,
2042 pkg_cstate_limit_strings[pkg_cstate_limit]);
2043 return;
2044 }
2045
2046 static void
2047 dump_config_tdp(void)
2048 {
2049 unsigned long long msr;
2050
2051 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
2052 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
2053 fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF);
2054
2055 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
2056 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
2057 if (msr) {
2058 fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
2059 fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
2060 fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
2061 fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF);
2062 }
2063 fprintf(outf, ")\n");
2064
2065 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
2066 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
2067 if (msr) {
2068 fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
2069 fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
2070 fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
2071 fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF);
2072 }
2073 fprintf(outf, ")\n");
2074
2075 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
2076 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
2077 if ((msr) & 0x3)
2078 fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
2079 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
2080 fprintf(outf, ")\n");
2081
2082 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
2083 fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
2084 fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF);
2085 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
2086 fprintf(outf, ")\n");
2087 }
2088
2089 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
2090
2091 void print_irtl(void)
2092 {
2093 unsigned long long msr;
2094
2095 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
2096 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
2097 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2098 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2099
2100 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
2101 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
2102 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2103 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2104
2105 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
2106 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
2107 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2108 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2109
2110 if (!do_irtl_hsw)
2111 return;
2112
2113 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
2114 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
2115 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2116 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2117
2118 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
2119 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
2120 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2121 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2122
2123 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
2124 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
2125 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2126 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2127
2128 }
2129 void free_fd_percpu(void)
2130 {
2131 int i;
2132
2133 for (i = 0; i < topo.max_cpu_num + 1; ++i) {
2134 if (fd_percpu[i] != 0)
2135 close(fd_percpu[i]);
2136 }
2137
2138 free(fd_percpu);
2139 }
2140
2141 void free_all_buffers(void)
2142 {
2143 CPU_FREE(cpu_present_set);
2144 cpu_present_set = NULL;
2145 cpu_present_setsize = 0;
2146
2147 CPU_FREE(cpu_affinity_set);
2148 cpu_affinity_set = NULL;
2149 cpu_affinity_setsize = 0;
2150
2151 free(thread_even);
2152 free(core_even);
2153 free(package_even);
2154
2155 thread_even = NULL;
2156 core_even = NULL;
2157 package_even = NULL;
2158
2159 free(thread_odd);
2160 free(core_odd);
2161 free(package_odd);
2162
2163 thread_odd = NULL;
2164 core_odd = NULL;
2165 package_odd = NULL;
2166
2167 free(output_buffer);
2168 output_buffer = NULL;
2169 outp = NULL;
2170
2171 free_fd_percpu();
2172
2173 free(irq_column_2_cpu);
2174 free(irqs_per_cpu);
2175 }
2176
2177
2178 /*
2179 * Parse a file containing a single int.
2180 */
2181 int parse_int_file(const char *fmt, ...)
2182 {
2183 va_list args;
2184 char path[PATH_MAX];
2185 FILE *filep;
2186 int value;
2187
2188 va_start(args, fmt);
2189 vsnprintf(path, sizeof(path), fmt, args);
2190 va_end(args);
2191 filep = fopen_or_die(path, "r");
2192 if (fscanf(filep, "%d", &value) != 1)
2193 err(1, "%s: failed to parse number from file", path);
2194 fclose(filep);
2195 return value;
2196 }
2197
2198 /*
2199 * get_cpu_position_in_core(cpu)
2200 * return the position of the CPU among its HT siblings in the core
2201 * return -1 if the sibling is not in list
2202 */
2203 int get_cpu_position_in_core(int cpu)
2204 {
2205 char path[64];
2206 FILE *filep;
2207 int this_cpu;
2208 char character;
2209 int i;
2210
2211 sprintf(path,
2212 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
2213 cpu);
2214 filep = fopen(path, "r");
2215 if (filep == NULL) {
2216 perror(path);
2217 exit(1);
2218 }
2219
2220 for (i = 0; i < topo.num_threads_per_core; i++) {
2221 fscanf(filep, "%d", &this_cpu);
2222 if (this_cpu == cpu) {
2223 fclose(filep);
2224 return i;
2225 }
2226
2227 /* Account for no separator after last thread*/
2228 if (i != (topo.num_threads_per_core - 1))
2229 fscanf(filep, "%c", &character);
2230 }
2231
2232 fclose(filep);
2233 return -1;
2234 }
2235
2236 /*
2237 * cpu_is_first_core_in_package(cpu)
2238 * return 1 if given CPU is 1st core in package
2239 */
2240 int cpu_is_first_core_in_package(int cpu)
2241 {
2242 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
2243 }
2244
2245 int get_physical_package_id(int cpu)
2246 {
2247 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
2248 }
2249
2250 int get_core_id(int cpu)
2251 {
2252 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
2253 }
2254
2255 int get_num_ht_siblings(int cpu)
2256 {
2257 char path[80];
2258 FILE *filep;
2259 int sib1;
2260 int matches = 0;
2261 char character;
2262 char str[100];
2263 char *ch;
2264
2265 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
2266 filep = fopen_or_die(path, "r");
2267
2268 /*
2269 * file format:
2270 * A ',' separated or '-' separated set of numbers
2271 * (eg 1-2 or 1,3,4,5)
2272 */
2273 fscanf(filep, "%d%c\n", &sib1, &character);
2274 fseek(filep, 0, SEEK_SET);
2275 fgets(str, 100, filep);
2276 ch = strchr(str, character);
2277 while (ch != NULL) {
2278 matches++;
2279 ch = strchr(ch+1, character);
2280 }
2281
2282 fclose(filep);
2283 return matches+1;
2284 }
2285
2286 /*
2287 * run func(thread, core, package) in topology order
2288 * skip non-present cpus
2289 */
2290
2291 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
2292 struct pkg_data *, struct thread_data *, struct core_data *,
2293 struct pkg_data *), struct thread_data *thread_base,
2294 struct core_data *core_base, struct pkg_data *pkg_base,
2295 struct thread_data *thread_base2, struct core_data *core_base2,
2296 struct pkg_data *pkg_base2)
2297 {
2298 int retval, pkg_no, core_no, thread_no;
2299
2300 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
2301 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
2302 for (thread_no = 0; thread_no <
2303 topo.num_threads_per_core; ++thread_no) {
2304 struct thread_data *t, *t2;
2305 struct core_data *c, *c2;
2306 struct pkg_data *p, *p2;
2307
2308 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
2309
2310 if (cpu_is_not_present(t->cpu_id))
2311 continue;
2312
2313 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
2314
2315 c = GET_CORE(core_base, core_no, pkg_no);
2316 c2 = GET_CORE(core_base2, core_no, pkg_no);
2317
2318 p = GET_PKG(pkg_base, pkg_no);
2319 p2 = GET_PKG(pkg_base2, pkg_no);
2320
2321 retval = func(t, c, p, t2, c2, p2);
2322 if (retval)
2323 return retval;
2324 }
2325 }
2326 }
2327 return 0;
2328 }
2329
2330 /*
2331 * run func(cpu) on every cpu in /proc/stat
2332 * return max_cpu number
2333 */
2334 int for_all_proc_cpus(int (func)(int))
2335 {
2336 FILE *fp;
2337 int cpu_num;
2338 int retval;
2339
2340 fp = fopen_or_die(proc_stat, "r");
2341
2342 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
2343 if (retval != 0)
2344 err(1, "%s: failed to parse format", proc_stat);
2345
2346 while (1) {
2347 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
2348 if (retval != 1)
2349 break;
2350
2351 retval = func(cpu_num);
2352 if (retval) {
2353 fclose(fp);
2354 return(retval);
2355 }
2356 }
2357 fclose(fp);
2358 return 0;
2359 }
2360
2361 void re_initialize(void)
2362 {
2363 free_all_buffers();
2364 setup_all_buffers();
2365 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
2366 }
2367
2368
2369 /*
2370 * count_cpus()
2371 * remember the last one seen, it will be the max
2372 */
2373 int count_cpus(int cpu)
2374 {
2375 if (topo.max_cpu_num < cpu)
2376 topo.max_cpu_num = cpu;
2377
2378 topo.num_cpus += 1;
2379 return 0;
2380 }
2381 int mark_cpu_present(int cpu)
2382 {
2383 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
2384 return 0;
2385 }
2386
2387 /*
2388 * snapshot_proc_interrupts()
2389 *
2390 * read and record summary of /proc/interrupts
2391 *
2392 * return 1 if config change requires a restart, else return 0
2393 */
2394 int snapshot_proc_interrupts(void)
2395 {
2396 static FILE *fp;
2397 int column, retval;
2398
2399 if (fp == NULL)
2400 fp = fopen_or_die("/proc/interrupts", "r");
2401 else
2402 rewind(fp);
2403
2404 /* read 1st line of /proc/interrupts to get cpu* name for each column */
2405 for (column = 0; column < topo.num_cpus; ++column) {
2406 int cpu_number;
2407
2408 retval = fscanf(fp, " CPU%d", &cpu_number);
2409 if (retval != 1)
2410 break;
2411
2412 if (cpu_number > topo.max_cpu_num) {
2413 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num);
2414 return 1;
2415 }
2416
2417 irq_column_2_cpu[column] = cpu_number;
2418 irqs_per_cpu[cpu_number] = 0;
2419 }
2420
2421 /* read /proc/interrupt count lines and sum up irqs per cpu */
2422 while (1) {
2423 int column;
2424 char buf[64];
2425
2426 retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */
2427 if (retval != 1)
2428 break;
2429
2430 /* read the count per cpu */
2431 for (column = 0; column < topo.num_cpus; ++column) {
2432
2433 int cpu_number, irq_count;
2434
2435 retval = fscanf(fp, " %d", &irq_count);
2436 if (retval != 1)
2437 break;
2438
2439 cpu_number = irq_column_2_cpu[column];
2440 irqs_per_cpu[cpu_number] += irq_count;
2441
2442 }
2443
2444 while (getc(fp) != '\n')
2445 ; /* flush interrupt description */
2446
2447 }
2448 return 0;
2449 }
2450 /*
2451 * snapshot_gfx_rc6_ms()
2452 *
2453 * record snapshot of
2454 * /sys/class/drm/card0/power/rc6_residency_ms
2455 *
2456 * return 1 if config change requires a restart, else return 0
2457 */
2458 int snapshot_gfx_rc6_ms(void)
2459 {
2460 FILE *fp;
2461 int retval;
2462
2463 fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r");
2464
2465 retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms);
2466 if (retval != 1)
2467 err(1, "GFX rc6");
2468
2469 fclose(fp);
2470
2471 return 0;
2472 }
2473 /*
2474 * snapshot_gfx_mhz()
2475 *
2476 * record snapshot of
2477 * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz
2478 *
2479 * return 1 if config change requires a restart, else return 0
2480 */
2481 int snapshot_gfx_mhz(void)
2482 {
2483 static FILE *fp;
2484 int retval;
2485
2486 if (fp == NULL)
2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2488 else
2489 rewind(fp);
2490
2491 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2492 if (retval != 1)
2493 err(1, "GFX MHz");
2494
2495 return 0;
2496 }
2497
2498 /*
2499 * snapshot /proc and /sys files
2500 *
2501 * return 1 if configuration restart needed, else return 0
2502 */
2503 int snapshot_proc_sysfs_files(void)
2504 {
2505 if (DO_BIC(BIC_IRQ))
2506 if (snapshot_proc_interrupts())
2507 return 1;
2508
2509 if (DO_BIC(BIC_GFX_rc6))
2510 snapshot_gfx_rc6_ms();
2511
2512 if (DO_BIC(BIC_GFXMHz))
2513 snapshot_gfx_mhz();
2514
2515 return 0;
2516 }
2517
2518 void turbostat_loop()
2519 {
2520 int retval;
2521 int restarted = 0;
2522
2523 restart:
2524 restarted++;
2525
2526 snapshot_proc_sysfs_files();
2527 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
2528 if (retval < -1) {
2529 exit(retval);
2530 } else if (retval == -1) {
2531 if (restarted > 1) {
2532 exit(retval);
2533 }
2534 re_initialize();
2535 goto restart;
2536 }
2537 restarted = 0;
2538 gettimeofday(&tv_even, (struct timezone *)NULL);
2539
2540 while (1) {
2541 if (for_all_proc_cpus(cpu_is_not_present)) {
2542 re_initialize();
2543 goto restart;
2544 }
2545 nanosleep(&interval_ts, NULL);
2546 if (snapshot_proc_sysfs_files())
2547 goto restart;
2548 retval = for_all_cpus(get_counters, ODD_COUNTERS);
2549 if (retval < -1) {
2550 exit(retval);
2551 } else if (retval == -1) {
2552 re_initialize();
2553 goto restart;
2554 }
2555 gettimeofday(&tv_odd, (struct timezone *)NULL);
2556 timersub(&tv_odd, &tv_even, &tv_delta);
2557 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) {
2558 re_initialize();
2559 goto restart;
2560 }
2561 compute_average(EVEN_COUNTERS);
2562 format_all_counters(EVEN_COUNTERS);
2563 flush_output_stdout();
2564 nanosleep(&interval_ts, NULL);
2565 if (snapshot_proc_sysfs_files())
2566 goto restart;
2567 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
2568 if (retval < -1) {
2569 exit(retval);
2570 } else if (retval == -1) {
2571 re_initialize();
2572 goto restart;
2573 }
2574 gettimeofday(&tv_even, (struct timezone *)NULL);
2575 timersub(&tv_even, &tv_odd, &tv_delta);
2576 if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) {
2577 re_initialize();
2578 goto restart;
2579 }
2580 compute_average(ODD_COUNTERS);
2581 format_all_counters(ODD_COUNTERS);
2582 flush_output_stdout();
2583 }
2584 }
2585
2586 void check_dev_msr()
2587 {
2588 struct stat sb;
2589 char pathname[32];
2590
2591 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
2592 if (stat(pathname, &sb))
2593 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
2594 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
2595 }
2596
2597 void check_permissions()
2598 {
2599 struct __user_cap_header_struct cap_header_data;
2600 cap_user_header_t cap_header = &cap_header_data;
2601 struct __user_cap_data_struct cap_data_data;
2602 cap_user_data_t cap_data = &cap_data_data;
2603 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
2604 int do_exit = 0;
2605 char pathname[32];
2606
2607 /* check for CAP_SYS_RAWIO */
2608 cap_header->pid = getpid();
2609 cap_header->version = _LINUX_CAPABILITY_VERSION;
2610 if (capget(cap_header, cap_data) < 0)
2611 err(-6, "capget(2) failed");
2612
2613 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
2614 do_exit++;
2615 warnx("capget(CAP_SYS_RAWIO) failed,"
2616 " try \"# setcap cap_sys_rawio=ep %s\"", progname);
2617 }
2618
2619 /* test file permissions */
2620 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
2621 if (euidaccess(pathname, R_OK)) {
2622 do_exit++;
2623 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
2624 }
2625
2626 /* if all else fails, thell them to be root */
2627 if (do_exit)
2628 if (getuid() != 0)
2629 warnx("... or simply run as root");
2630
2631 if (do_exit)
2632 exit(-6);
2633 }
2634
2635 /*
2636 * NHM adds support for additional MSRs:
2637 *
2638 * MSR_SMI_COUNT 0x00000034
2639 *
2640 * MSR_PLATFORM_INFO 0x000000ce
2641 * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
2642 *
2643 * MSR_MISC_PWR_MGMT 0x000001aa
2644 *
2645 * MSR_PKG_C3_RESIDENCY 0x000003f8
2646 * MSR_PKG_C6_RESIDENCY 0x000003f9
2647 * MSR_CORE_C3_RESIDENCY 0x000003fc
2648 * MSR_CORE_C6_RESIDENCY 0x000003fd
2649 *
2650 * Side effect:
2651 * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL
2652 * sets has_misc_feature_control
2653 */
2654 int probe_nhm_msrs(unsigned int family, unsigned int model)
2655 {
2656 unsigned long long msr;
2657 unsigned int base_ratio;
2658 int *pkg_cstate_limits;
2659
2660 if (!genuine_intel)
2661 return 0;
2662
2663 if (family != 6)
2664 return 0;
2665
2666 bclk = discover_bclk(family, model);
2667
2668 switch (model) {
2669 case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
2670 case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
2671 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
2672 case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */
2673 case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */
2674 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
2675 case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
2676 pkg_cstate_limits = nhm_pkg_cstate_limits;
2677 break;
2678 case INTEL_FAM6_SANDYBRIDGE: /* SNB */
2679 case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */
2680 case INTEL_FAM6_IVYBRIDGE: /* IVB */
2681 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
2682 pkg_cstate_limits = snb_pkg_cstate_limits;
2683 has_misc_feature_control = 1;
2684 break;
2685 case INTEL_FAM6_HASWELL_CORE: /* HSW */
2686 case INTEL_FAM6_HASWELL_X: /* HSX */
2687 case INTEL_FAM6_HASWELL_ULT: /* HSW */
2688 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
2689 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
2690 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
2691 case INTEL_FAM6_BROADWELL_X: /* BDX */
2692 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
2693 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
2694 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
2695 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
2696 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
2697 pkg_cstate_limits = hsw_pkg_cstate_limits;
2698 has_misc_feature_control = 1;
2699 break;
2700 case INTEL_FAM6_SKYLAKE_X: /* SKX */
2701 pkg_cstate_limits = skx_pkg_cstate_limits;
2702 has_misc_feature_control = 1;
2703 break;
2704 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
2705 no_MSR_MISC_PWR_MGMT = 1;
2706 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
2707 pkg_cstate_limits = slv_pkg_cstate_limits;
2708 break;
2709 case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
2710 pkg_cstate_limits = amt_pkg_cstate_limits;
2711 no_MSR_MISC_PWR_MGMT = 1;
2712 break;
2713 case INTEL_FAM6_XEON_PHI_KNL: /* PHI */
2714 case INTEL_FAM6_XEON_PHI_KNM:
2715 pkg_cstate_limits = phi_pkg_cstate_limits;
2716 break;
2717 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
2718 case INTEL_FAM6_ATOM_GEMINI_LAKE:
2719 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
2720 pkg_cstate_limits = bxt_pkg_cstate_limits;
2721 break;
2722 default:
2723 return 0;
2724 }
2725 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
2726 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
2727
2728 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
2729 base_ratio = (msr >> 8) & 0xFF;
2730
2731 base_hz = base_ratio * bclk * 1000000;
2732 has_base_hz = 1;
2733 return 1;
2734 }
2735 /*
2736 * SLV client has support for unique MSRs:
2737 *
2738 * MSR_CC6_DEMOTION_POLICY_CONFIG
2739 * MSR_MC6_DEMOTION_POLICY_CONFIG
2740 */
2741
2742 int has_slv_msrs(unsigned int family, unsigned int model)
2743 {
2744 if (!genuine_intel)
2745 return 0;
2746
2747 switch (model) {
2748 case INTEL_FAM6_ATOM_SILVERMONT1:
2749 case INTEL_FAM6_ATOM_MERRIFIELD:
2750 case INTEL_FAM6_ATOM_MOOREFIELD:
2751 return 1;
2752 }
2753 return 0;
2754 }
2755 int is_dnv(unsigned int family, unsigned int model)
2756 {
2757
2758 if (!genuine_intel)
2759 return 0;
2760
2761 switch (model) {
2762 case INTEL_FAM6_ATOM_DENVERTON:
2763 return 1;
2764 }
2765 return 0;
2766 }
2767 int is_bdx(unsigned int family, unsigned int model)
2768 {
2769
2770 if (!genuine_intel)
2771 return 0;
2772
2773 switch (model) {
2774 case INTEL_FAM6_BROADWELL_X:
2775 case INTEL_FAM6_BROADWELL_XEON_D:
2776 return 1;
2777 }
2778 return 0;
2779 }
2780 int is_skx(unsigned int family, unsigned int model)
2781 {
2782
2783 if (!genuine_intel)
2784 return 0;
2785
2786 switch (model) {
2787 case INTEL_FAM6_SKYLAKE_X:
2788 return 1;
2789 }
2790 return 0;
2791 }
2792
2793 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
2794 {
2795 if (has_slv_msrs(family, model))
2796 return 0;
2797
2798 switch (model) {
2799 /* Nehalem compatible, but do not include turbo-ratio limit support */
2800 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
2801 case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
2802 case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */
2803 case INTEL_FAM6_XEON_PHI_KNM:
2804 return 0;
2805 default:
2806 return 1;
2807 }
2808 }
2809 int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model)
2810 {
2811 if (has_slv_msrs(family, model))
2812 return 1;
2813
2814 return 0;
2815 }
2816 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
2817 {
2818 if (!genuine_intel)
2819 return 0;
2820
2821 if (family != 6)
2822 return 0;
2823
2824 switch (model) {
2825 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
2826 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
2827 return 1;
2828 default:
2829 return 0;
2830 }
2831 }
2832 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
2833 {
2834 if (!genuine_intel)
2835 return 0;
2836
2837 if (family != 6)
2838 return 0;
2839
2840 switch (model) {
2841 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
2842 return 1;
2843 default:
2844 return 0;
2845 }
2846 }
2847
2848 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
2849 {
2850 if (!genuine_intel)
2851 return 0;
2852
2853 if (family != 6)
2854 return 0;
2855
2856 switch (model) {
2857 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
2858 case INTEL_FAM6_XEON_PHI_KNM:
2859 return 1;
2860 default:
2861 return 0;
2862 }
2863 }
2864 int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model)
2865 {
2866 if (!genuine_intel)
2867 return 0;
2868
2869 if (family != 6)
2870 return 0;
2871
2872 switch (model) {
2873 case INTEL_FAM6_ATOM_GOLDMONT:
2874 case INTEL_FAM6_SKYLAKE_X:
2875 return 1;
2876 default:
2877 return 0;
2878 }
2879 }
2880 int has_config_tdp(unsigned int family, unsigned int model)
2881 {
2882 if (!genuine_intel)
2883 return 0;
2884
2885 if (family != 6)
2886 return 0;
2887
2888 switch (model) {
2889 case INTEL_FAM6_IVYBRIDGE: /* IVB */
2890 case INTEL_FAM6_HASWELL_CORE: /* HSW */
2891 case INTEL_FAM6_HASWELL_X: /* HSX */
2892 case INTEL_FAM6_HASWELL_ULT: /* HSW */
2893 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
2894 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
2895 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
2896 case INTEL_FAM6_BROADWELL_X: /* BDX */
2897 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
2898 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
2899 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
2900 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
2901 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
2902 case INTEL_FAM6_SKYLAKE_X: /* SKX */
2903
2904 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
2905 case INTEL_FAM6_XEON_PHI_KNM:
2906 return 1;
2907 default:
2908 return 0;
2909 }
2910 }
2911
2912 static void
2913 dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
2914 {
2915 if (!do_nhm_platform_info)
2916 return;
2917
2918 dump_nhm_platform_info();
2919
2920 if (has_hsw_turbo_ratio_limit(family, model))
2921 dump_hsw_turbo_ratio_limits();
2922
2923 if (has_ivt_turbo_ratio_limit(family, model))
2924 dump_ivt_turbo_ratio_limits();
2925
2926 if (has_turbo_ratio_limit(family, model))
2927 dump_turbo_ratio_limits(family, model);
2928
2929 if (has_atom_turbo_ratio_limit(family, model))
2930 dump_atom_turbo_ratio_limits();
2931
2932 if (has_knl_turbo_ratio_limit(family, model))
2933 dump_knl_turbo_ratio_limits();
2934
2935 if (has_config_tdp(family, model))
2936 dump_config_tdp();
2937
2938 dump_nhm_cst_cfg();
2939 }
2940
2941 static void
2942 dump_sysfs_cstate_config(void)
2943 {
2944 char path[64];
2945 char name_buf[16];
2946 char desc[64];
2947 FILE *input;
2948 int state;
2949 char *sp;
2950
2951 if (!DO_BIC(BIC_sysfs))
2952 return;
2953
2954 for (state = 0; state < 10; ++state) {
2955
2956 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
2957 base_cpu, state);
2958 input = fopen(path, "r");
2959 if (input == NULL)
2960 continue;
2961 fgets(name_buf, sizeof(name_buf), input);
2962
2963 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
2964 sp = strchr(name_buf, '-');
2965 if (!sp)
2966 sp = strchrnul(name_buf, '\n');
2967 *sp = '\0';
2968
2969 fclose(input);
2970
2971 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
2972 base_cpu, state);
2973 input = fopen(path, "r");
2974 if (input == NULL)
2975 continue;
2976 fgets(desc, sizeof(desc), input);
2977
2978 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
2979 fclose(input);
2980 }
2981 }
2982 static void
2983 dump_sysfs_pstate_config(void)
2984 {
2985 char path[64];
2986 char driver_buf[64];
2987 char governor_buf[64];
2988 FILE *input;
2989 int turbo;
2990
2991 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver",
2992 base_cpu);
2993 input = fopen(path, "r");
2994 if (input == NULL) {
2995 fprintf(stderr, "NSFOD %s\n", path);
2996 return;
2997 }
2998 fgets(driver_buf, sizeof(driver_buf), input);
2999 fclose(input);
3000
3001 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
3002 base_cpu);
3003 input = fopen(path, "r");
3004 if (input == NULL) {
3005 fprintf(stderr, "NSFOD %s\n", path);
3006 return;
3007 }
3008 fgets(governor_buf, sizeof(governor_buf), input);
3009 fclose(input);
3010
3011 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
3012 fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf);
3013
3014 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
3015 input = fopen(path, "r");
3016 if (input != NULL) {
3017 fscanf(input, "%d", &turbo);
3018 fprintf(outf, "cpufreq boost: %d\n", turbo);
3019 fclose(input);
3020 }
3021
3022 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
3023 input = fopen(path, "r");
3024 if (input != NULL) {
3025 fscanf(input, "%d", &turbo);
3026 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
3027 fclose(input);
3028 }
3029 }
3030
3031
3032 /*
3033 * print_epb()
3034 * Decode the ENERGY_PERF_BIAS MSR
3035 */
3036 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3037 {
3038 unsigned long long msr;
3039 char *epb_string;
3040 int cpu;
3041
3042 if (!has_epb)
3043 return 0;
3044
3045 cpu = t->cpu_id;
3046
3047 /* EPB is per-package */
3048 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3049 return 0;
3050
3051 if (cpu_migrate(cpu)) {
3052 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3053 return -1;
3054 }
3055
3056 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
3057 return 0;
3058
3059 switch (msr & 0xF) {
3060 case ENERGY_PERF_BIAS_PERFORMANCE:
3061 epb_string = "performance";
3062 break;
3063 case ENERGY_PERF_BIAS_NORMAL:
3064 epb_string = "balanced";
3065 break;
3066 case ENERGY_PERF_BIAS_POWERSAVE:
3067 epb_string = "powersave";
3068 break;
3069 default:
3070 epb_string = "custom";
3071 break;
3072 }
3073 fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
3074
3075 return 0;
3076 }
3077 /*
3078 * print_hwp()
3079 * Decode the MSR_HWP_CAPABILITIES
3080 */
3081 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3082 {
3083 unsigned long long msr;
3084 int cpu;
3085
3086 if (!has_hwp)
3087 return 0;
3088
3089 cpu = t->cpu_id;
3090
3091 /* MSR_HWP_CAPABILITIES is per-package */
3092 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3093 return 0;
3094
3095 if (cpu_migrate(cpu)) {
3096 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3097 return -1;
3098 }
3099
3100 if (get_msr(cpu, MSR_PM_ENABLE, &msr))
3101 return 0;
3102
3103 fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n",
3104 cpu, msr, (msr & (1 << 0)) ? "" : "No-");
3105
3106 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
3107 if ((msr & (1 << 0)) == 0)
3108 return 0;
3109
3110 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
3111 return 0;
3112
3113 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3114 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
3115 cpu, msr,
3116 (unsigned int)HWP_HIGHEST_PERF(msr),
3117 (unsigned int)HWP_GUARANTEED_PERF(msr),
3118 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr),
3119 (unsigned int)HWP_LOWEST_PERF(msr));
3120
3121 if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
3122 return 0;
3123
3124 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3125 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
3126 cpu, msr,
3127 (unsigned int)(((msr) >> 0) & 0xff),
3128 (unsigned int)(((msr) >> 8) & 0xff),
3129 (unsigned int)(((msr) >> 16) & 0xff),
3130 (unsigned int)(((msr) >> 24) & 0xff),
3131 (unsigned int)(((msr) >> 32) & 0xff3),
3132 (unsigned int)(((msr) >> 42) & 0x1));
3133
3134 if (has_hwp_pkg) {
3135 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
3136 return 0;
3137
3138 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3139 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
3140 cpu, msr,
3141 (unsigned int)(((msr) >> 0) & 0xff),
3142 (unsigned int)(((msr) >> 8) & 0xff),
3143 (unsigned int)(((msr) >> 16) & 0xff),
3144 (unsigned int)(((msr) >> 24) & 0xff),
3145 (unsigned int)(((msr) >> 32) & 0xff3));
3146 }
3147 if (has_hwp_notify) {
3148 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
3149 return 0;
3150
3151 fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
3152 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
3153 cpu, msr,
3154 ((msr) & 0x1) ? "EN" : "Dis",
3155 ((msr) & 0x2) ? "EN" : "Dis");
3156 }
3157 if (get_msr(cpu, MSR_HWP_STATUS, &msr))
3158 return 0;
3159
3160 fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
3161 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
3162 cpu, msr,
3163 ((msr) & 0x1) ? "" : "No-",
3164 ((msr) & 0x2) ? "" : "No-");
3165
3166 return 0;
3167 }
3168
3169 /*
3170 * print_perf_limit()
3171 */
3172 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3173 {
3174 unsigned long long msr;
3175 int cpu;
3176
3177 cpu = t->cpu_id;
3178
3179 /* per-package */
3180 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3181 return 0;
3182
3183 if (cpu_migrate(cpu)) {
3184 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3185 return -1;
3186 }
3187
3188 if (do_core_perf_limit_reasons) {
3189 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
3190 fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3191 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3192 (msr & 1 << 15) ? "bit15, " : "",
3193 (msr & 1 << 14) ? "bit14, " : "",
3194 (msr & 1 << 13) ? "Transitions, " : "",
3195 (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
3196 (msr & 1 << 11) ? "PkgPwrL2, " : "",
3197 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3198 (msr & 1 << 9) ? "CorePwr, " : "",
3199 (msr & 1 << 8) ? "Amps, " : "",
3200 (msr & 1 << 6) ? "VR-Therm, " : "",
3201 (msr & 1 << 5) ? "Auto-HWP, " : "",
3202 (msr & 1 << 4) ? "Graphics, " : "",
3203 (msr & 1 << 2) ? "bit2, " : "",
3204 (msr & 1 << 1) ? "ThermStatus, " : "",
3205 (msr & 1 << 0) ? "PROCHOT, " : "");
3206 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
3207 (msr & 1 << 31) ? "bit31, " : "",
3208 (msr & 1 << 30) ? "bit30, " : "",
3209 (msr & 1 << 29) ? "Transitions, " : "",
3210 (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
3211 (msr & 1 << 27) ? "PkgPwrL2, " : "",
3212 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3213 (msr & 1 << 25) ? "CorePwr, " : "",
3214 (msr & 1 << 24) ? "Amps, " : "",
3215 (msr & 1 << 22) ? "VR-Therm, " : "",
3216 (msr & 1 << 21) ? "Auto-HWP, " : "",
3217 (msr & 1 << 20) ? "Graphics, " : "",
3218 (msr & 1 << 18) ? "bit18, " : "",
3219 (msr & 1 << 17) ? "ThermStatus, " : "",
3220 (msr & 1 << 16) ? "PROCHOT, " : "");
3221
3222 }
3223 if (do_gfx_perf_limit_reasons) {
3224 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
3225 fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3226 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)",
3227 (msr & 1 << 0) ? "PROCHOT, " : "",
3228 (msr & 1 << 1) ? "ThermStatus, " : "",
3229 (msr & 1 << 4) ? "Graphics, " : "",
3230 (msr & 1 << 6) ? "VR-Therm, " : "",
3231 (msr & 1 << 8) ? "Amps, " : "",
3232 (msr & 1 << 9) ? "GFXPwr, " : "",
3233 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3234 (msr & 1 << 11) ? "PkgPwrL2, " : "");
3235 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n",
3236 (msr & 1 << 16) ? "PROCHOT, " : "",
3237 (msr & 1 << 17) ? "ThermStatus, " : "",
3238 (msr & 1 << 20) ? "Graphics, " : "",
3239 (msr & 1 << 22) ? "VR-Therm, " : "",
3240 (msr & 1 << 24) ? "Amps, " : "",
3241 (msr & 1 << 25) ? "GFXPwr, " : "",
3242 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3243 (msr & 1 << 27) ? "PkgPwrL2, " : "");
3244 }
3245 if (do_ring_perf_limit_reasons) {
3246 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
3247 fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3248 fprintf(outf, " (Active: %s%s%s%s%s%s)",
3249 (msr & 1 << 0) ? "PROCHOT, " : "",
3250 (msr & 1 << 1) ? "ThermStatus, " : "",
3251 (msr & 1 << 6) ? "VR-Therm, " : "",
3252 (msr & 1 << 8) ? "Amps, " : "",
3253 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3254 (msr & 1 << 11) ? "PkgPwrL2, " : "");
3255 fprintf(outf, " (Logged: %s%s%s%s%s%s)\n",
3256 (msr & 1 << 16) ? "PROCHOT, " : "",
3257 (msr & 1 << 17) ? "ThermStatus, " : "",
3258 (msr & 1 << 22) ? "VR-Therm, " : "",
3259 (msr & 1 << 24) ? "Amps, " : "",
3260 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3261 (msr & 1 << 27) ? "PkgPwrL2, " : "");
3262 }
3263 return 0;
3264 }
3265
3266 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
3267 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
3268
3269 double get_tdp(unsigned int model)
3270 {
3271 unsigned long long msr;
3272
3273 if (do_rapl & RAPL_PKG_POWER_INFO)
3274 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
3275 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
3276
3277 switch (model) {
3278 case INTEL_FAM6_ATOM_SILVERMONT1:
3279 case INTEL_FAM6_ATOM_SILVERMONT2:
3280 return 30.0;
3281 default:
3282 return 135.0;
3283 }
3284 }
3285
3286 /*
3287 * rapl_dram_energy_units_probe()
3288 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
3289 */
3290 static double
3291 rapl_dram_energy_units_probe(int model, double rapl_energy_units)
3292 {
3293 /* only called for genuine_intel, family 6 */
3294
3295 switch (model) {
3296 case INTEL_FAM6_HASWELL_X: /* HSX */
3297 case INTEL_FAM6_BROADWELL_X: /* BDX */
3298 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
3299 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
3300 case INTEL_FAM6_XEON_PHI_KNM:
3301 return (rapl_dram_energy_units = 15.3 / 1000000);
3302 default:
3303 return (rapl_energy_units);
3304 }
3305 }
3306
3307
3308 /*
3309 * rapl_probe()
3310 *
3311 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
3312 */
3313 void rapl_probe(unsigned int family, unsigned int model)
3314 {
3315 unsigned long long msr;
3316 unsigned int time_unit;
3317 double tdp;
3318
3319 if (!genuine_intel)
3320 return;
3321
3322 if (family != 6)
3323 return;
3324
3325 switch (model) {
3326 case INTEL_FAM6_SANDYBRIDGE:
3327 case INTEL_FAM6_IVYBRIDGE:
3328 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3329 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3330 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3331 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3332 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3333 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
3334 if (rapl_joules) {
3335 BIC_PRESENT(BIC_Pkg_J);
3336 BIC_PRESENT(BIC_Cor_J);
3337 BIC_PRESENT(BIC_GFX_J);
3338 } else {
3339 BIC_PRESENT(BIC_PkgWatt);
3340 BIC_PRESENT(BIC_CorWatt);
3341 BIC_PRESENT(BIC_GFXWatt);
3342 }
3343 break;
3344 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
3345 case INTEL_FAM6_ATOM_GEMINI_LAKE:
3346 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
3347 if (rapl_joules)
3348 BIC_PRESENT(BIC_Pkg_J);
3349 else
3350 BIC_PRESENT(BIC_PkgWatt);
3351 break;
3352 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
3353 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3354 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3355 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3356 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
3357 BIC_PRESENT(BIC_PKG__);
3358 BIC_PRESENT(BIC_RAM__);
3359 if (rapl_joules) {
3360 BIC_PRESENT(BIC_Pkg_J);
3361 BIC_PRESENT(BIC_Cor_J);
3362 BIC_PRESENT(BIC_RAM_J);
3363 } else {
3364 BIC_PRESENT(BIC_PkgWatt);
3365 BIC_PRESENT(BIC_CorWatt);
3366 BIC_PRESENT(BIC_RAMWatt);
3367 }
3368 break;
3369 case INTEL_FAM6_HASWELL_X: /* HSX */
3370 case INTEL_FAM6_BROADWELL_X: /* BDX */
3371 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
3372 case INTEL_FAM6_SKYLAKE_X: /* SKX */
3373 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
3374 case INTEL_FAM6_XEON_PHI_KNM:
3375 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
3376 BIC_PRESENT(BIC_PKG__);
3377 BIC_PRESENT(BIC_RAM__);
3378 if (rapl_joules) {
3379 BIC_PRESENT(BIC_Pkg_J);
3380 BIC_PRESENT(BIC_RAM_J);
3381 } else {
3382 BIC_PRESENT(BIC_PkgWatt);
3383 BIC_PRESENT(BIC_RAMWatt);
3384 }
3385 break;
3386 case INTEL_FAM6_SANDYBRIDGE_X:
3387 case INTEL_FAM6_IVYBRIDGE_X:
3388 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
3389 BIC_PRESENT(BIC_PKG__);
3390 BIC_PRESENT(BIC_RAM__);
3391 if (rapl_joules) {
3392 BIC_PRESENT(BIC_Pkg_J);
3393 BIC_PRESENT(BIC_Cor_J);
3394 BIC_PRESENT(BIC_RAM_J);
3395 } else {
3396 BIC_PRESENT(BIC_PkgWatt);
3397 BIC_PRESENT(BIC_CorWatt);
3398 BIC_PRESENT(BIC_RAMWatt);
3399 }
3400 break;
3401 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
3402 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
3403 do_rapl = RAPL_PKG | RAPL_CORES;
3404 if (rapl_joules) {
3405 BIC_PRESENT(BIC_Pkg_J);
3406 BIC_PRESENT(BIC_Cor_J);
3407 } else {
3408 BIC_PRESENT(BIC_PkgWatt);
3409 BIC_PRESENT(BIC_CorWatt);
3410 }
3411 break;
3412 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
3413 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
3414 BIC_PRESENT(BIC_PKG__);
3415 BIC_PRESENT(BIC_RAM__);
3416 if (rapl_joules) {
3417 BIC_PRESENT(BIC_Pkg_J);
3418 BIC_PRESENT(BIC_Cor_J);
3419 BIC_PRESENT(BIC_RAM_J);
3420 } else {
3421 BIC_PRESENT(BIC_PkgWatt);
3422 BIC_PRESENT(BIC_CorWatt);
3423 BIC_PRESENT(BIC_RAMWatt);
3424 }
3425 break;
3426 default:
3427 return;
3428 }
3429
3430 /* units on package 0, verify later other packages match */
3431 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
3432 return;
3433
3434 rapl_power_units = 1.0 / (1 << (msr & 0xF));
3435 if (model == INTEL_FAM6_ATOM_SILVERMONT1)
3436 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
3437 else
3438 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
3439
3440 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
3441
3442 time_unit = msr >> 16 & 0xF;
3443 if (time_unit == 0)
3444 time_unit = 0xA;
3445
3446 rapl_time_units = 1.0 / (1 << (time_unit));
3447
3448 tdp = get_tdp(model);
3449
3450 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
3451 if (!quiet)
3452 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
3453
3454 return;
3455 }
3456
3457 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3458 {
3459 if (!genuine_intel)
3460 return;
3461
3462 if (family != 6)
3463 return;
3464
3465 switch (model) {
3466 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3467 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3468 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3469 do_gfx_perf_limit_reasons = 1;
3470 case INTEL_FAM6_HASWELL_X: /* HSX */
3471 do_core_perf_limit_reasons = 1;
3472 do_ring_perf_limit_reasons = 1;
3473 default:
3474 return;
3475 }
3476 }
3477
3478 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3479 {
3480 unsigned long long msr;
3481 unsigned int dts;
3482 int cpu;
3483
3484 if (!(do_dts || do_ptm))
3485 return 0;
3486
3487 cpu = t->cpu_id;
3488
3489 /* DTS is per-core, no need to print for each thread */
3490 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
3491 return 0;
3492
3493 if (cpu_migrate(cpu)) {
3494 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3495 return -1;
3496 }
3497
3498 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
3499 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
3500 return 0;
3501
3502 dts = (msr >> 16) & 0x7F;
3503 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3504 cpu, msr, tcc_activation_temp - dts);
3505
3506 #ifdef THERM_DEBUG
3507 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3508 return 0;
3509
3510 dts = (msr >> 16) & 0x7F;
3511 dts2 = (msr >> 8) & 0x7F;
3512 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3513 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3514 #endif
3515 }
3516
3517
3518 if (do_dts) {
3519 unsigned int resolution;
3520
3521 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
3522 return 0;
3523
3524 dts = (msr >> 16) & 0x7F;
3525 resolution = (msr >> 27) & 0xF;
3526 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3527 cpu, msr, tcc_activation_temp - dts, resolution);
3528
3529 #ifdef THERM_DEBUG
3530 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3531 return 0;
3532
3533 dts = (msr >> 16) & 0x7F;
3534 dts2 = (msr >> 8) & 0x7F;
3535 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3536 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3537 #endif
3538 }
3539
3540 return 0;
3541 }
3542
3543 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
3544 {
3545 fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
3546 cpu, label,
3547 ((msr >> 15) & 1) ? "EN" : "DIS",
3548 ((msr >> 0) & 0x7FFF) * rapl_power_units,
3549 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
3550 (((msr >> 16) & 1) ? "EN" : "DIS"));
3551
3552 return;
3553 }
3554
3555 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3556 {
3557 unsigned long long msr;
3558 int cpu;
3559
3560 if (!do_rapl)
3561 return 0;
3562
3563 /* RAPL counters are per package, so print only for 1st thread/package */
3564 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3565 return 0;
3566
3567 cpu = t->cpu_id;
3568 if (cpu_migrate(cpu)) {
3569 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3570 return -1;
3571 }
3572
3573 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
3574 return -1;
3575
3576 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
3577 rapl_power_units, rapl_energy_units, rapl_time_units);
3578
3579 if (do_rapl & RAPL_PKG_POWER_INFO) {
3580
3581 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
3582 return -5;
3583
3584
3585 fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
3586 cpu, msr,
3587 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3588 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3589 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3590 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
3591
3592 }
3593 if (do_rapl & RAPL_PKG) {
3594
3595 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
3596 return -9;
3597
3598 fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
3599 cpu, msr, (msr >> 63) & 1 ? "" : "UN");
3600
3601 print_power_limit_msr(cpu, msr, "PKG Limit #1");
3602 fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
3603 cpu,
3604 ((msr >> 47) & 1) ? "EN" : "DIS",
3605 ((msr >> 32) & 0x7FFF) * rapl_power_units,
3606 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
3607 ((msr >> 48) & 1) ? "EN" : "DIS");
3608 }
3609
3610 if (do_rapl & RAPL_DRAM_POWER_INFO) {
3611 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
3612 return -6;
3613
3614 fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
3615 cpu, msr,
3616 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3617 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3618 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
3619 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
3620 }
3621 if (do_rapl & RAPL_DRAM) {
3622 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
3623 return -9;
3624 fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
3625 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3626
3627 print_power_limit_msr(cpu, msr, "DRAM Limit");
3628 }
3629 if (do_rapl & RAPL_CORE_POLICY) {
3630 if (get_msr(cpu, MSR_PP0_POLICY, &msr))
3631 return -7;
3632
3633 fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
3634 }
3635 if (do_rapl & RAPL_CORES_POWER_LIMIT) {
3636 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
3637 return -9;
3638 fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
3639 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3640 print_power_limit_msr(cpu, msr, "Cores Limit");
3641 }
3642 if (do_rapl & RAPL_GFX) {
3643 if (get_msr(cpu, MSR_PP1_POLICY, &msr))
3644 return -8;
3645
3646 fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
3647
3648 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
3649 return -9;
3650 fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
3651 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
3652 print_power_limit_msr(cpu, msr, "GFX Limit");
3653 }
3654 return 0;
3655 }
3656
3657 /*
3658 * SNB adds support for additional MSRs:
3659 *
3660 * MSR_PKG_C7_RESIDENCY 0x000003fa
3661 * MSR_CORE_C7_RESIDENCY 0x000003fe
3662 * MSR_PKG_C2_RESIDENCY 0x0000060d
3663 */
3664
3665 int has_snb_msrs(unsigned int family, unsigned int model)
3666 {
3667 if (!genuine_intel)
3668 return 0;
3669
3670 switch (model) {
3671 case INTEL_FAM6_SANDYBRIDGE:
3672 case INTEL_FAM6_SANDYBRIDGE_X:
3673 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3674 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
3675 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3676 case INTEL_FAM6_HASWELL_X: /* HSW */
3677 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3678 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3679 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3680 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
3681 case INTEL_FAM6_BROADWELL_X: /* BDX */
3682 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
3683 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
3684 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3685 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3686 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3687 case INTEL_FAM6_SKYLAKE_X: /* SKX */
3688 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
3689 case INTEL_FAM6_ATOM_GEMINI_LAKE:
3690 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
3691 return 1;
3692 }
3693 return 0;
3694 }
3695
3696 /*
3697 * HSW adds support for additional MSRs:
3698 *
3699 * MSR_PKG_C8_RESIDENCY 0x00000630
3700 * MSR_PKG_C9_RESIDENCY 0x00000631
3701 * MSR_PKG_C10_RESIDENCY 0x00000632
3702 *
3703 * MSR_PKGC8_IRTL 0x00000633
3704 * MSR_PKGC9_IRTL 0x00000634
3705 * MSR_PKGC10_IRTL 0x00000635
3706 *
3707 */
3708 int has_hsw_msrs(unsigned int family, unsigned int model)
3709 {
3710 if (!genuine_intel)
3711 return 0;
3712
3713 switch (model) {
3714 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3715 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3716 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
3717 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3718 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3719 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3720 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
3721 case INTEL_FAM6_ATOM_GEMINI_LAKE:
3722 return 1;
3723 }
3724 return 0;
3725 }
3726
3727 /*
3728 * SKL adds support for additional MSRS:
3729 *
3730 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
3731 * MSR_PKG_ANY_CORE_C0_RES 0x00000659
3732 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
3733 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
3734 */
3735 int has_skl_msrs(unsigned int family, unsigned int model)
3736 {
3737 if (!genuine_intel)
3738 return 0;
3739
3740 switch (model) {
3741 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
3742 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3743 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3744 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3745 return 1;
3746 }
3747 return 0;
3748 }
3749
3750 int is_slm(unsigned int family, unsigned int model)
3751 {
3752 if (!genuine_intel)
3753 return 0;
3754 switch (model) {
3755 case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
3756 case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
3757 return 1;
3758 }
3759 return 0;
3760 }
3761
3762 int is_knl(unsigned int family, unsigned int model)
3763 {
3764 if (!genuine_intel)
3765 return 0;
3766 switch (model) {
3767 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
3768 case INTEL_FAM6_XEON_PHI_KNM:
3769 return 1;
3770 }
3771 return 0;
3772 }
3773
3774 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
3775 {
3776 if (is_knl(family, model))
3777 return 1024;
3778 return 1;
3779 }
3780
3781 #define SLM_BCLK_FREQS 5
3782 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
3783
3784 double slm_bclk(void)
3785 {
3786 unsigned long long msr = 3;
3787 unsigned int i;
3788 double freq;
3789
3790 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
3791 fprintf(outf, "SLM BCLK: unknown\n");
3792
3793 i = msr & 0xf;
3794 if (i >= SLM_BCLK_FREQS) {
3795 fprintf(outf, "SLM BCLK[%d] invalid\n", i);
3796 i = 3;
3797 }
3798 freq = slm_freq_table[i];
3799
3800 if (!quiet)
3801 fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq);
3802
3803 return freq;
3804 }
3805
3806 double discover_bclk(unsigned int family, unsigned int model)
3807 {
3808 if (has_snb_msrs(family, model) || is_knl(family, model))
3809 return 100.00;
3810 else if (is_slm(family, model))
3811 return slm_bclk();
3812 else
3813 return 133.33;
3814 }
3815
3816 /*
3817 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
3818 * the Thermal Control Circuit (TCC) activates.
3819 * This is usually equal to tjMax.
3820 *
3821 * Older processors do not have this MSR, so there we guess,
3822 * but also allow cmdline over-ride with -T.
3823 *
3824 * Several MSR temperature values are in units of degrees-C
3825 * below this value, including the Digital Thermal Sensor (DTS),
3826 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
3827 */
3828 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3829 {
3830 unsigned long long msr;
3831 unsigned int target_c_local;
3832 int cpu;
3833
3834 /* tcc_activation_temp is used only for dts or ptm */
3835 if (!(do_dts || do_ptm))
3836 return 0;
3837
3838 /* this is a per-package concept */
3839 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3840 return 0;
3841
3842 cpu = t->cpu_id;
3843 if (cpu_migrate(cpu)) {
3844 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3845 return -1;
3846 }
3847
3848 if (tcc_activation_temp_override != 0) {
3849 tcc_activation_temp = tcc_activation_temp_override;
3850 fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
3851 cpu, tcc_activation_temp);
3852 return 0;
3853 }
3854
3855 /* Temperature Target MSR is Nehalem and newer only */
3856 if (!do_nhm_platform_info)
3857 goto guess;
3858
3859 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
3860 goto guess;
3861
3862 target_c_local = (msr >> 16) & 0xFF;
3863
3864 if (!quiet)
3865 fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
3866 cpu, msr, target_c_local);
3867
3868 if (!target_c_local)
3869 goto guess;
3870
3871 tcc_activation_temp = target_c_local;
3872
3873 return 0;
3874
3875 guess:
3876 tcc_activation_temp = TJMAX_DEFAULT;
3877 fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
3878 cpu, tcc_activation_temp);
3879
3880 return 0;
3881 }
3882
3883 void decode_feature_control_msr(void)
3884 {
3885 unsigned long long msr;
3886
3887 if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr))
3888 fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n",
3889 base_cpu, msr,
3890 msr & FEATURE_CONTROL_LOCKED ? "" : "UN-",
3891 msr & (1 << 18) ? "SGX" : "");
3892 }
3893
3894 void decode_misc_enable_msr(void)
3895 {
3896 unsigned long long msr;
3897
3898 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
3899 fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n",
3900 base_cpu, msr,
3901 msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-",
3902 msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-",
3903 msr & MSR_IA32_MISC_ENABLE_MWAIT ? "No-" : "",
3904 msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "",
3905 msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : "");
3906 }
3907
3908 void decode_misc_feature_control(void)
3909 {
3910 unsigned long long msr;
3911
3912 if (!has_misc_feature_control)
3913 return;
3914
3915 if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr))
3916 fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n",
3917 base_cpu, msr,
3918 msr & (0 << 0) ? "No-" : "",
3919 msr & (1 << 0) ? "No-" : "",
3920 msr & (2 << 0) ? "No-" : "",
3921 msr & (3 << 0) ? "No-" : "");
3922 }
3923 /*
3924 * Decode MSR_MISC_PWR_MGMT
3925 *
3926 * Decode the bits according to the Nehalem documentation
3927 * bit[0] seems to continue to have same meaning going forward
3928 * bit[1] less so...
3929 */
3930 void decode_misc_pwr_mgmt_msr(void)
3931 {
3932 unsigned long long msr;
3933
3934 if (!do_nhm_platform_info)
3935 return;
3936
3937 if (no_MSR_MISC_PWR_MGMT)
3938 return;
3939
3940 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
3941 fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n",
3942 base_cpu, msr,
3943 msr & (1 << 0) ? "DIS" : "EN",
3944 msr & (1 << 1) ? "EN" : "DIS",
3945 msr & (1 << 8) ? "EN" : "DIS");
3946 }
3947 /*
3948 * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG
3949 *
3950 * This MSRs are present on Silvermont processors,
3951 * Intel Atom processor E3000 series (Baytrail), and friends.
3952 */
3953 void decode_c6_demotion_policy_msr(void)
3954 {
3955 unsigned long long msr;
3956
3957 if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr))
3958 fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n",
3959 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
3960
3961 if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr))
3962 fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n",
3963 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
3964 }
3965
3966 void process_cpuid()
3967 {
3968 unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
3969 unsigned int fms, family, model, stepping;
3970 unsigned int has_turbo;
3971
3972 eax = ebx = ecx = edx = 0;
3973
3974 __cpuid(0, max_level, ebx, ecx, edx);
3975
3976 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
3977 genuine_intel = 1;
3978
3979 if (!quiet)
3980 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
3981 (char *)&ebx, (char *)&edx, (char *)&ecx);
3982
3983 __cpuid(1, fms, ebx, ecx, edx);
3984 family = (fms >> 8) & 0xf;
3985 model = (fms >> 4) & 0xf;
3986 stepping = fms & 0xf;
3987 if (family == 6 || family == 0xf)
3988 model += ((fms >> 16) & 0xf) << 4;
3989
3990 if (!quiet) {
3991 fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
3992 max_level, family, model, stepping, family, model, stepping);
3993 fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
3994 ecx & (1 << 0) ? "SSE3" : "-",
3995 ecx & (1 << 3) ? "MONITOR" : "-",
3996 ecx & (1 << 6) ? "SMX" : "-",
3997 ecx & (1 << 7) ? "EIST" : "-",
3998 ecx & (1 << 8) ? "TM2" : "-",
3999 edx & (1 << 4) ? "TSC" : "-",
4000 edx & (1 << 5) ? "MSR" : "-",
4001 edx & (1 << 22) ? "ACPI-TM" : "-",
4002 edx & (1 << 29) ? "TM" : "-");
4003 }
4004
4005 if (!(edx & (1 << 5)))
4006 errx(1, "CPUID: no MSR");
4007
4008 /*
4009 * check max extended function levels of CPUID.
4010 * This is needed to check for invariant TSC.
4011 * This check is valid for both Intel and AMD.
4012 */
4013 ebx = ecx = edx = 0;
4014 __cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
4015
4016 if (max_extended_level >= 0x80000007) {
4017
4018 /*
4019 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
4020 * this check is valid for both Intel and AMD
4021 */
4022 __cpuid(0x80000007, eax, ebx, ecx, edx);
4023 has_invariant_tsc = edx & (1 << 8);
4024 }
4025
4026 /*
4027 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
4028 * this check is valid for both Intel and AMD
4029 */
4030
4031 __cpuid(0x6, eax, ebx, ecx, edx);
4032 has_aperf = ecx & (1 << 0);
4033 if (has_aperf) {
4034 BIC_PRESENT(BIC_Avg_MHz);
4035 BIC_PRESENT(BIC_Busy);
4036 BIC_PRESENT(BIC_Bzy_MHz);
4037 }
4038 do_dts = eax & (1 << 0);
4039 if (do_dts)
4040 BIC_PRESENT(BIC_CoreTmp);
4041 has_turbo = eax & (1 << 1);
4042 do_ptm = eax & (1 << 6);
4043 if (do_ptm)
4044 BIC_PRESENT(BIC_PkgTmp);
4045 has_hwp = eax & (1 << 7);
4046 has_hwp_notify = eax & (1 << 8);
4047 has_hwp_activity_window = eax & (1 << 9);
4048 has_hwp_epp = eax & (1 << 10);
4049 has_hwp_pkg = eax & (1 << 11);
4050 has_epb = ecx & (1 << 3);
4051
4052 if (!quiet)
4053 fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, "
4054 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
4055 has_aperf ? "" : "No-",
4056 has_turbo ? "" : "No-",
4057 do_dts ? "" : "No-",
4058 do_ptm ? "" : "No-",
4059 has_hwp ? "" : "No-",
4060 has_hwp_notify ? "" : "No-",
4061 has_hwp_activity_window ? "" : "No-",
4062 has_hwp_epp ? "" : "No-",
4063 has_hwp_pkg ? "" : "No-",
4064 has_epb ? "" : "No-");
4065
4066 if (!quiet)
4067 decode_misc_enable_msr();
4068
4069
4070 if (max_level >= 0x7 && !quiet) {
4071 int has_sgx;
4072
4073 ecx = 0;
4074
4075 __cpuid_count(0x7, 0, eax, ebx, ecx, edx);
4076
4077 has_sgx = ebx & (1 << 2);
4078 fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-");
4079
4080 if (has_sgx)
4081 decode_feature_control_msr();
4082 }
4083
4084 if (max_level >= 0x15) {
4085 unsigned int eax_crystal;
4086 unsigned int ebx_tsc;
4087
4088 /*
4089 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
4090 */
4091 eax_crystal = ebx_tsc = crystal_hz = edx = 0;
4092 __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx);
4093
4094 if (ebx_tsc != 0) {
4095
4096 if (!quiet && (ebx != 0))
4097 fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
4098 eax_crystal, ebx_tsc, crystal_hz);
4099
4100 if (crystal_hz == 0)
4101 switch(model) {
4102 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
4103 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
4104 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
4105 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
4106 crystal_hz = 24000000; /* 24.0 MHz */
4107 break;
4108 case INTEL_FAM6_SKYLAKE_X: /* SKX */
4109 case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
4110 crystal_hz = 25000000; /* 25.0 MHz */
4111 break;
4112 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
4113 case INTEL_FAM6_ATOM_GEMINI_LAKE:
4114 crystal_hz = 19200000; /* 19.2 MHz */
4115 break;
4116 default:
4117 crystal_hz = 0;
4118 }
4119
4120 if (crystal_hz) {
4121 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
4122 if (!quiet)
4123 fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
4124 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
4125 }
4126 }
4127 }
4128 if (max_level >= 0x16) {
4129 unsigned int base_mhz, max_mhz, bus_mhz, edx;
4130
4131 /*
4132 * CPUID 16H Base MHz, Max MHz, Bus MHz
4133 */
4134 base_mhz = max_mhz = bus_mhz = edx = 0;
4135
4136 __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx);
4137 if (!quiet)
4138 fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
4139 base_mhz, max_mhz, bus_mhz);
4140 }
4141
4142 if (has_aperf)
4143 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
4144
4145 BIC_PRESENT(BIC_IRQ);
4146 BIC_PRESENT(BIC_TSC_MHz);
4147
4148 if (probe_nhm_msrs(family, model)) {
4149 do_nhm_platform_info = 1;
4150 BIC_PRESENT(BIC_CPU_c1);
4151 BIC_PRESENT(BIC_CPU_c3);
4152 BIC_PRESENT(BIC_CPU_c6);
4153 BIC_PRESENT(BIC_SMI);
4154 }
4155 do_snb_cstates = has_snb_msrs(family, model);
4156
4157 if (do_snb_cstates)
4158 BIC_PRESENT(BIC_CPU_c7);
4159
4160 do_irtl_snb = has_snb_msrs(family, model);
4161 if (do_snb_cstates && (pkg_cstate_limit >= PCL__2))
4162 BIC_PRESENT(BIC_Pkgpc2);
4163 if (pkg_cstate_limit >= PCL__3)
4164 BIC_PRESENT(BIC_Pkgpc3);
4165 if (pkg_cstate_limit >= PCL__6)
4166 BIC_PRESENT(BIC_Pkgpc6);
4167 if (do_snb_cstates && (pkg_cstate_limit >= PCL__7))
4168 BIC_PRESENT(BIC_Pkgpc7);
4169 if (has_slv_msrs(family, model)) {
4170 BIC_NOT_PRESENT(BIC_Pkgpc2);
4171 BIC_NOT_PRESENT(BIC_Pkgpc3);
4172 BIC_PRESENT(BIC_Pkgpc6);
4173 BIC_NOT_PRESENT(BIC_Pkgpc7);
4174 BIC_PRESENT(BIC_Mod_c6);
4175 use_c1_residency_msr = 1;
4176 }
4177 if (is_dnv(family, model)) {
4178 BIC_PRESENT(BIC_CPU_c1);
4179 BIC_NOT_PRESENT(BIC_CPU_c3);
4180 BIC_NOT_PRESENT(BIC_Pkgpc3);
4181 BIC_NOT_PRESENT(BIC_CPU_c7);
4182 BIC_NOT_PRESENT(BIC_Pkgpc7);
4183 use_c1_residency_msr = 1;
4184 }
4185 if (is_skx(family, model)) {
4186 BIC_NOT_PRESENT(BIC_CPU_c3);
4187 BIC_NOT_PRESENT(BIC_Pkgpc3);
4188 BIC_NOT_PRESENT(BIC_CPU_c7);
4189 BIC_NOT_PRESENT(BIC_Pkgpc7);
4190 }
4191 if (is_bdx(family, model)) {
4192 BIC_NOT_PRESENT(BIC_CPU_c7);
4193 BIC_NOT_PRESENT(BIC_Pkgpc7);
4194 }
4195 if (has_hsw_msrs(family, model)) {
4196 BIC_PRESENT(BIC_Pkgpc8);
4197 BIC_PRESENT(BIC_Pkgpc9);
4198 BIC_PRESENT(BIC_Pkgpc10);
4199 }
4200 do_irtl_hsw = has_hsw_msrs(family, model);
4201 do_skl_residency = has_skl_msrs(family, model);
4202 do_slm_cstates = is_slm(family, model);
4203 do_knl_cstates = is_knl(family, model);
4204
4205 if (!quiet)
4206 decode_misc_pwr_mgmt_msr();
4207
4208 if (!quiet && has_slv_msrs(family, model))
4209 decode_c6_demotion_policy_msr();
4210
4211 rapl_probe(family, model);
4212 perf_limit_reasons_probe(family, model);
4213
4214 if (!quiet)
4215 dump_cstate_pstate_config_info(family, model);
4216
4217 if (!quiet)
4218 dump_sysfs_cstate_config();
4219 if (!quiet)
4220 dump_sysfs_pstate_config();
4221
4222 if (has_skl_msrs(family, model))
4223 calculate_tsc_tweak();
4224
4225 if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK))
4226 BIC_PRESENT(BIC_GFX_rc6);
4227
4228 if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
4229 BIC_PRESENT(BIC_GFXMHz);
4230
4231 if (!quiet)
4232 decode_misc_feature_control();
4233
4234 return;
4235 }
4236
4237
4238 /*
4239 * in /dev/cpu/ return success for names that are numbers
4240 * ie. filter out ".", "..", "microcode".
4241 */
4242 int dir_filter(const struct dirent *dirp)
4243 {
4244 if (isdigit(dirp->d_name[0]))
4245 return 1;
4246 else
4247 return 0;
4248 }
4249
4250 int open_dev_cpu_msr(int dummy1)
4251 {
4252 return 0;
4253 }
4254
4255 void topology_probe()
4256 {
4257 int i;
4258 int max_core_id = 0;
4259 int max_package_id = 0;
4260 int max_siblings = 0;
4261 struct cpu_topology {
4262 int core_id;
4263 int physical_package_id;
4264 } *cpus;
4265
4266 /* Initialize num_cpus, max_cpu_num */
4267 topo.num_cpus = 0;
4268 topo.max_cpu_num = 0;
4269 for_all_proc_cpus(count_cpus);
4270 if (!summary_only && topo.num_cpus > 1)
4271 BIC_PRESENT(BIC_CPU);
4272
4273 if (debug > 1)
4274 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
4275
4276 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
4277 if (cpus == NULL)
4278 err(1, "calloc cpus");
4279
4280 /*
4281 * Allocate and initialize cpu_present_set
4282 */
4283 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
4284 if (cpu_present_set == NULL)
4285 err(3, "CPU_ALLOC");
4286 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4287 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
4288 for_all_proc_cpus(mark_cpu_present);
4289
4290 /*
4291 * Validate that all cpus in cpu_subset are also in cpu_present_set
4292 */
4293 for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) {
4294 if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset))
4295 if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set))
4296 err(1, "cpu%d not present", i);
4297 }
4298
4299 /*
4300 * Allocate and initialize cpu_affinity_set
4301 */
4302 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
4303 if (cpu_affinity_set == NULL)
4304 err(3, "CPU_ALLOC");
4305 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4306 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
4307
4308
4309 /*
4310 * For online cpus
4311 * find max_core_id, max_package_id
4312 */
4313 for (i = 0; i <= topo.max_cpu_num; ++i) {
4314 int siblings;
4315
4316 if (cpu_is_not_present(i)) {
4317 if (debug > 1)
4318 fprintf(outf, "cpu%d NOT PRESENT\n", i);
4319 continue;
4320 }
4321 cpus[i].core_id = get_core_id(i);
4322 if (cpus[i].core_id > max_core_id)
4323 max_core_id = cpus[i].core_id;
4324
4325 cpus[i].physical_package_id = get_physical_package_id(i);
4326 if (cpus[i].physical_package_id > max_package_id)
4327 max_package_id = cpus[i].physical_package_id;
4328
4329 siblings = get_num_ht_siblings(i);
4330 if (siblings > max_siblings)
4331 max_siblings = siblings;
4332 if (debug > 1)
4333 fprintf(outf, "cpu %d pkg %d core %d\n",
4334 i, cpus[i].physical_package_id, cpus[i].core_id);
4335 }
4336 topo.num_cores_per_pkg = max_core_id + 1;
4337 if (debug > 1)
4338 fprintf(outf, "max_core_id %d, sizing for %d cores per package\n",
4339 max_core_id, topo.num_cores_per_pkg);
4340 if (!summary_only && topo.num_cores_per_pkg > 1)
4341 BIC_PRESENT(BIC_Core);
4342
4343 topo.num_packages = max_package_id + 1;
4344 if (debug > 1)
4345 fprintf(outf, "max_package_id %d, sizing for %d packages\n",
4346 max_package_id, topo.num_packages);
4347 if (!summary_only && topo.num_packages > 1)
4348 BIC_PRESENT(BIC_Package);
4349
4350 topo.num_threads_per_core = max_siblings;
4351 if (debug > 1)
4352 fprintf(outf, "max_siblings %d\n", max_siblings);
4353
4354 free(cpus);
4355 }
4356
4357 void
4358 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
4359 {
4360 int i;
4361
4362 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
4363 topo.num_packages, sizeof(struct thread_data));
4364 if (*t == NULL)
4365 goto error;
4366
4367 for (i = 0; i < topo.num_threads_per_core *
4368 topo.num_cores_per_pkg * topo.num_packages; i++)
4369 (*t)[i].cpu_id = -1;
4370
4371 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
4372 sizeof(struct core_data));
4373 if (*c == NULL)
4374 goto error;
4375
4376 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
4377 (*c)[i].core_id = -1;
4378
4379 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
4380 if (*p == NULL)
4381 goto error;
4382
4383 for (i = 0; i < topo.num_packages; i++)
4384 (*p)[i].package_id = i;
4385
4386 return;
4387 error:
4388 err(1, "calloc counters");
4389 }
4390 /*
4391 * init_counter()
4392 *
4393 * set cpu_id, core_num, pkg_num
4394 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
4395 *
4396 * increment topo.num_cores when 1st core in pkg seen
4397 */
4398 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
4399 struct pkg_data *pkg_base, int thread_num, int core_num,
4400 int pkg_num, int cpu_id)
4401 {
4402 struct thread_data *t;
4403 struct core_data *c;
4404 struct pkg_data *p;
4405
4406 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
4407 c = GET_CORE(core_base, core_num, pkg_num);
4408 p = GET_PKG(pkg_base, pkg_num);
4409
4410 t->cpu_id = cpu_id;
4411 if (thread_num == 0) {
4412 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
4413 if (cpu_is_first_core_in_package(cpu_id))
4414 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
4415 }
4416
4417 c->core_id = core_num;
4418 p->package_id = pkg_num;
4419 }
4420
4421
4422 int initialize_counters(int cpu_id)
4423 {
4424 int my_thread_id, my_core_id, my_package_id;
4425
4426 my_package_id = get_physical_package_id(cpu_id);
4427 my_core_id = get_core_id(cpu_id);
4428 my_thread_id = get_cpu_position_in_core(cpu_id);
4429 if (!my_thread_id)
4430 topo.num_cores++;
4431
4432 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
4433 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
4434 return 0;
4435 }
4436
4437 void allocate_output_buffer()
4438 {
4439 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
4440 outp = output_buffer;
4441 if (outp == NULL)
4442 err(-1, "calloc output buffer");
4443 }
4444 void allocate_fd_percpu(void)
4445 {
4446 fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
4447 if (fd_percpu == NULL)
4448 err(-1, "calloc fd_percpu");
4449 }
4450 void allocate_irq_buffers(void)
4451 {
4452 irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int));
4453 if (irq_column_2_cpu == NULL)
4454 err(-1, "calloc %d", topo.num_cpus);
4455
4456 irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
4457 if (irqs_per_cpu == NULL)
4458 err(-1, "calloc %d", topo.max_cpu_num + 1);
4459 }
4460 void setup_all_buffers(void)
4461 {
4462 topology_probe();
4463 allocate_irq_buffers();
4464 allocate_fd_percpu();
4465 allocate_counters(&thread_even, &core_even, &package_even);
4466 allocate_counters(&thread_odd, &core_odd, &package_odd);
4467 allocate_output_buffer();
4468 for_all_proc_cpus(initialize_counters);
4469 }
4470
4471 void set_base_cpu(void)
4472 {
4473 base_cpu = sched_getcpu();
4474 if (base_cpu < 0)
4475 err(-ENODEV, "No valid cpus found");
4476
4477 if (debug > 1)
4478 fprintf(outf, "base_cpu = %d\n", base_cpu);
4479 }
4480
4481 void turbostat_init()
4482 {
4483 setup_all_buffers();
4484 set_base_cpu();
4485 check_dev_msr();
4486 check_permissions();
4487 process_cpuid();
4488
4489
4490 if (!quiet)
4491 for_all_cpus(print_hwp, ODD_COUNTERS);
4492
4493 if (!quiet)
4494 for_all_cpus(print_epb, ODD_COUNTERS);
4495
4496 if (!quiet)
4497 for_all_cpus(print_perf_limit, ODD_COUNTERS);
4498
4499 if (!quiet)
4500 for_all_cpus(print_rapl, ODD_COUNTERS);
4501
4502 for_all_cpus(set_temperature_target, ODD_COUNTERS);
4503
4504 if (!quiet)
4505 for_all_cpus(print_thermal, ODD_COUNTERS);
4506
4507 if (!quiet && do_irtl_snb)
4508 print_irtl();
4509 }
4510
4511 int fork_it(char **argv)
4512 {
4513 pid_t child_pid;
4514 int status;
4515
4516 snapshot_proc_sysfs_files();
4517 status = for_all_cpus(get_counters, EVEN_COUNTERS);
4518 if (status)
4519 exit(status);
4520 /* clear affinity side-effect of get_counters() */
4521 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
4522 gettimeofday(&tv_even, (struct timezone *)NULL);
4523
4524 child_pid = fork();
4525 if (!child_pid) {
4526 /* child */
4527 execvp(argv[0], argv);
4528 err(errno, "exec %s", argv[0]);
4529 } else {
4530
4531 /* parent */
4532 if (child_pid == -1)
4533 err(1, "fork");
4534
4535 signal(SIGINT, SIG_IGN);
4536 signal(SIGQUIT, SIG_IGN);
4537 if (waitpid(child_pid, &status, 0) == -1)
4538 err(status, "waitpid");
4539 }
4540 /*
4541 * n.b. fork_it() does not check for errors from for_all_cpus()
4542 * because re-starting is problematic when forking
4543 */
4544 snapshot_proc_sysfs_files();
4545 for_all_cpus(get_counters, ODD_COUNTERS);
4546 gettimeofday(&tv_odd, (struct timezone *)NULL);
4547 timersub(&tv_odd, &tv_even, &tv_delta);
4548 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
4549 fprintf(outf, "%s: Counter reset detected\n", progname);
4550 else {
4551 compute_average(EVEN_COUNTERS);
4552 format_all_counters(EVEN_COUNTERS);
4553 }
4554
4555 fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
4556
4557 flush_output_stderr();
4558
4559 return status;
4560 }
4561
4562 int get_and_dump_counters(void)
4563 {
4564 int status;
4565
4566 snapshot_proc_sysfs_files();
4567 status = for_all_cpus(get_counters, ODD_COUNTERS);
4568 if (status)
4569 return status;
4570
4571 status = for_all_cpus(dump_counters, ODD_COUNTERS);
4572 if (status)
4573 return status;
4574
4575 flush_output_stdout();
4576
4577 return status;
4578 }
4579
4580 void print_version() {
4581 fprintf(outf, "turbostat version 17.02.24"
4582 " - Len Brown <lenb@kernel.org>\n");
4583 }
4584
4585 int add_counter(unsigned int msr_num, char *path, char *name,
4586 unsigned int width, enum counter_scope scope,
4587 enum counter_type type, enum counter_format format, int flags)
4588 {
4589 struct msr_counter *msrp;
4590
4591 msrp = calloc(1, sizeof(struct msr_counter));
4592 if (msrp == NULL) {
4593 perror("calloc");
4594 exit(1);
4595 }
4596
4597 msrp->msr_num = msr_num;
4598 strncpy(msrp->name, name, NAME_BYTES);
4599 if (path)
4600 strncpy(msrp->path, path, PATH_BYTES);
4601 msrp->width = width;
4602 msrp->type = type;
4603 msrp->format = format;
4604 msrp->flags = flags;
4605
4606 switch (scope) {
4607
4608 case SCOPE_CPU:
4609 msrp->next = sys.tp;
4610 sys.tp = msrp;
4611 sys.added_thread_counters++;
4612 if (sys.added_thread_counters > MAX_ADDED_COUNTERS) {
4613 fprintf(stderr, "exceeded max %d added thread counters\n",
4614 MAX_ADDED_COUNTERS);
4615 exit(-1);
4616 }
4617 break;
4618
4619 case SCOPE_CORE:
4620 msrp->next = sys.cp;
4621 sys.cp = msrp;
4622 sys.added_core_counters++;
4623 if (sys.added_core_counters > MAX_ADDED_COUNTERS) {
4624 fprintf(stderr, "exceeded max %d added core counters\n",
4625 MAX_ADDED_COUNTERS);
4626 exit(-1);
4627 }
4628 break;
4629
4630 case SCOPE_PACKAGE:
4631 msrp->next = sys.pp;
4632 sys.pp = msrp;
4633 sys.added_package_counters++;
4634 if (sys.added_package_counters > MAX_ADDED_COUNTERS) {
4635 fprintf(stderr, "exceeded max %d added package counters\n",
4636 MAX_ADDED_COUNTERS);
4637 exit(-1);
4638 }
4639 break;
4640 }
4641
4642 return 0;
4643 }
4644
4645 void parse_add_command(char *add_command)
4646 {
4647 int msr_num = 0;
4648 char *path = NULL;
4649 char name_buffer[NAME_BYTES] = "";
4650 int width = 64;
4651 int fail = 0;
4652 enum counter_scope scope = SCOPE_CPU;
4653 enum counter_type type = COUNTER_CYCLES;
4654 enum counter_format format = FORMAT_DELTA;
4655
4656 while (add_command) {
4657
4658 if (sscanf(add_command, "msr0x%x", &msr_num) == 1)
4659 goto next;
4660
4661 if (sscanf(add_command, "msr%d", &msr_num) == 1)
4662 goto next;
4663
4664 if (*add_command == '/') {
4665 path = add_command;
4666 goto next;
4667 }
4668
4669 if (sscanf(add_command, "u%d", &width) == 1) {
4670 if ((width == 32) || (width == 64))
4671 goto next;
4672 width = 64;
4673 }
4674 if (!strncmp(add_command, "cpu", strlen("cpu"))) {
4675 scope = SCOPE_CPU;
4676 goto next;
4677 }
4678 if (!strncmp(add_command, "core", strlen("core"))) {
4679 scope = SCOPE_CORE;
4680 goto next;
4681 }
4682 if (!strncmp(add_command, "package", strlen("package"))) {
4683 scope = SCOPE_PACKAGE;
4684 goto next;
4685 }
4686 if (!strncmp(add_command, "cycles", strlen("cycles"))) {
4687 type = COUNTER_CYCLES;
4688 goto next;
4689 }
4690 if (!strncmp(add_command, "seconds", strlen("seconds"))) {
4691 type = COUNTER_SECONDS;
4692 goto next;
4693 }
4694 if (!strncmp(add_command, "usec", strlen("usec"))) {
4695 type = COUNTER_USEC;
4696 goto next;
4697 }
4698 if (!strncmp(add_command, "raw", strlen("raw"))) {
4699 format = FORMAT_RAW;
4700 goto next;
4701 }
4702 if (!strncmp(add_command, "delta", strlen("delta"))) {
4703 format = FORMAT_DELTA;
4704 goto next;
4705 }
4706 if (!strncmp(add_command, "percent", strlen("percent"))) {
4707 format = FORMAT_PERCENT;
4708 goto next;
4709 }
4710
4711 if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) { /* 18 < NAME_BYTES */
4712 char *eos;
4713
4714 eos = strchr(name_buffer, ',');
4715 if (eos)
4716 *eos = '\0';
4717 goto next;
4718 }
4719
4720 next:
4721 add_command = strchr(add_command, ',');
4722 if (add_command) {
4723 *add_command = '\0';
4724 add_command++;
4725 }
4726
4727 }
4728 if ((msr_num == 0) && (path == NULL)) {
4729 fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n");
4730 fail++;
4731 }
4732
4733 /* generate default column header */
4734 if (*name_buffer == '\0') {
4735 if (width == 32)
4736 sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : "");
4737 else
4738 sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : "");
4739 }
4740
4741 if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0))
4742 fail++;
4743
4744 if (fail) {
4745 help();
4746 exit(1);
4747 }
4748 }
4749
4750 int is_deferred_skip(char *name)
4751 {
4752 int i;
4753
4754 for (i = 0; i < deferred_skip_index; ++i)
4755 if (!strcmp(name, deferred_skip_names[i]))
4756 return 1;
4757 return 0;
4758 }
4759
4760 void probe_sysfs(void)
4761 {
4762 char path[64];
4763 char name_buf[16];
4764 FILE *input;
4765 int state;
4766 char *sp;
4767
4768 if (!DO_BIC(BIC_sysfs))
4769 return;
4770
4771 for (state = 10; state > 0; --state) {
4772
4773 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
4774 base_cpu, state);
4775 input = fopen(path, "r");
4776 if (input == NULL)
4777 continue;
4778 fgets(name_buf, sizeof(name_buf), input);
4779
4780 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
4781 sp = strchr(name_buf, '-');
4782 if (!sp)
4783 sp = strchrnul(name_buf, '\n');
4784 *sp = '%';
4785 *(sp + 1) = '\0';
4786
4787 fclose(input);
4788
4789 sprintf(path, "cpuidle/state%d/time", state);
4790
4791 if (is_deferred_skip(name_buf))
4792 continue;
4793
4794 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC,
4795 FORMAT_PERCENT, SYSFS_PERCPU);
4796 }
4797
4798 for (state = 10; state > 0; --state) {
4799
4800 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
4801 base_cpu, state);
4802 input = fopen(path, "r");
4803 if (input == NULL)
4804 continue;
4805 fgets(name_buf, sizeof(name_buf), input);
4806 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
4807 sp = strchr(name_buf, '-');
4808 if (!sp)
4809 sp = strchrnul(name_buf, '\n');
4810 *sp = '\0';
4811 fclose(input);
4812
4813 sprintf(path, "cpuidle/state%d/usage", state);
4814
4815 if (is_deferred_skip(name_buf))
4816 continue;
4817
4818 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS,
4819 FORMAT_DELTA, SYSFS_PERCPU);
4820 }
4821
4822 }
4823
4824
4825 /*
4826 * parse cpuset with following syntax
4827 * 1,2,4..6,8-10 and set bits in cpu_subset
4828 */
4829 void parse_cpu_command(char *optarg)
4830 {
4831 unsigned int start, end;
4832 char *next;
4833
4834 if (!strcmp(optarg, "core")) {
4835 if (cpu_subset)
4836 goto error;
4837 show_core_only++;
4838 return;
4839 }
4840 if (!strcmp(optarg, "package")) {
4841 if (cpu_subset)
4842 goto error;
4843 show_pkg_only++;
4844 return;
4845 }
4846 if (show_core_only || show_pkg_only)
4847 goto error;
4848
4849 cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS);
4850 if (cpu_subset == NULL)
4851 err(3, "CPU_ALLOC");
4852 cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS);
4853
4854 CPU_ZERO_S(cpu_subset_size, cpu_subset);
4855
4856 next = optarg;
4857
4858 while (next && *next) {
4859
4860 if (*next == '-') /* no negative cpu numbers */
4861 goto error;
4862
4863 start = strtoul(next, &next, 10);
4864
4865 if (start >= CPU_SUBSET_MAXCPUS)
4866 goto error;
4867 CPU_SET_S(start, cpu_subset_size, cpu_subset);
4868
4869 if (*next == '\0')
4870 break;
4871
4872 if (*next == ',') {
4873 next += 1;
4874 continue;
4875 }
4876
4877 if (*next == '-') {
4878 next += 1; /* start range */
4879 } else if (*next == '.') {
4880 next += 1;
4881 if (*next == '.')
4882 next += 1; /* start range */
4883 else
4884 goto error;
4885 }
4886
4887 end = strtoul(next, &next, 10);
4888 if (end <= start)
4889 goto error;
4890
4891 while (++start <= end) {
4892 if (start >= CPU_SUBSET_MAXCPUS)
4893 goto error;
4894 CPU_SET_S(start, cpu_subset_size, cpu_subset);
4895 }
4896
4897 if (*next == ',')
4898 next += 1;
4899 else if (*next != '\0')
4900 goto error;
4901 }
4902
4903 return;
4904
4905 error:
4906 fprintf(stderr, "\"--cpu %s\" malformed\n", optarg);
4907 help();
4908 exit(-1);
4909 }
4910
4911 int shown;
4912 /*
4913 * parse_show_hide() - process cmdline to set default counter action
4914 */
4915 void parse_show_hide(char *optarg, enum show_hide_mode new_mode)
4916 {
4917 /*
4918 * --show: show only those specified
4919 * The 1st invocation will clear and replace the enabled mask
4920 * subsequent invocations can add to it.
4921 */
4922 if (new_mode == SHOW_LIST) {
4923 if (shown == 0)
4924 bic_enabled = bic_lookup(optarg, new_mode);
4925 else
4926 bic_enabled |= bic_lookup(optarg, new_mode);
4927 shown = 1;
4928
4929 return;
4930 }
4931
4932 /*
4933 * --hide: do not show those specified
4934 * multiple invocations simply clear more bits in enabled mask
4935 */
4936 bic_enabled &= ~bic_lookup(optarg, new_mode);
4937
4938 }
4939
4940 void cmdline(int argc, char **argv)
4941 {
4942 int opt;
4943 int option_index = 0;
4944 static struct option long_options[] = {
4945 {"add", required_argument, 0, 'a'},
4946 {"cpu", required_argument, 0, 'c'},
4947 {"Dump", no_argument, 0, 'D'},
4948 {"debug", no_argument, 0, 'd'}, /* internal, not documented */
4949 {"interval", required_argument, 0, 'i'},
4950 {"help", no_argument, 0, 'h'},
4951 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
4952 {"Joules", no_argument, 0, 'J'},
4953 {"list", no_argument, 0, 'l'},
4954 {"out", required_argument, 0, 'o'},
4955 {"quiet", no_argument, 0, 'q'},
4956 {"show", required_argument, 0, 's'},
4957 {"Summary", no_argument, 0, 'S'},
4958 {"TCC", required_argument, 0, 'T'},
4959 {"version", no_argument, 0, 'v' },
4960 {0, 0, 0, 0 }
4961 };
4962
4963 progname = argv[0];
4964
4965 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
4966 long_options, &option_index)) != -1) {
4967 switch (opt) {
4968 case 'a':
4969 parse_add_command(optarg);
4970 break;
4971 case 'c':
4972 parse_cpu_command(optarg);
4973 break;
4974 case 'D':
4975 dump_only++;
4976 break;
4977 case 'd':
4978 debug++;
4979 break;
4980 case 'H':
4981 parse_show_hide(optarg, HIDE_LIST);
4982 break;
4983 case 'h':
4984 default:
4985 help();
4986 exit(1);
4987 case 'i':
4988 {
4989 double interval = strtod(optarg, NULL);
4990
4991 if (interval < 0.001) {
4992 fprintf(outf, "interval %f seconds is too small\n",
4993 interval);
4994 exit(2);
4995 }
4996
4997 interval_ts.tv_sec = interval;
4998 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000;
4999 }
5000 break;
5001 case 'J':
5002 rapl_joules++;
5003 break;
5004 case 'l':
5005 list_header_only++;
5006 quiet++;
5007 break;
5008 case 'o':
5009 outf = fopen_or_die(optarg, "w");
5010 break;
5011 case 'q':
5012 quiet = 1;
5013 break;
5014 case 's':
5015 parse_show_hide(optarg, SHOW_LIST);
5016 break;
5017 case 'S':
5018 summary_only++;
5019 break;
5020 case 'T':
5021 tcc_activation_temp_override = atoi(optarg);
5022 break;
5023 case 'v':
5024 print_version();
5025 exit(0);
5026 break;
5027 }
5028 }
5029 }
5030
5031 int main(int argc, char **argv)
5032 {
5033 outf = stderr;
5034
5035 cmdline(argc, argv);
5036
5037 if (!quiet)
5038 print_version();
5039
5040 probe_sysfs();
5041
5042 turbostat_init();
5043
5044 /* dump counters and exit */
5045 if (dump_only)
5046 return get_and_dump_counters();
5047
5048 /* list header and exit */
5049 if (list_header_only) {
5050 print_header(",");
5051 flush_output_stdout();
5052 return 0;
5053 }
5054
5055 /*
5056 * if any params left, it must be a command to fork
5057 */
5058 if (argc - optind)
5059 return fork_it(argv + optind);
5060 else
5061 turbostat_loop();
5062
5063 return 0;
5064 }