summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-10 08:36:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-10 08:36:32 -0700
commit96463e4e0268dddbdb60fd1b96800736aa2bade9 (patch)
tree09d7f956c3e00eb1866f6209e2895c0f5cbb3185 /tools
parent017102b40c34b5a67de46230bdfb06096dd11716 (diff)
parentba893caead54745595e29953f0531cf3651610aa (diff)
Merge tag 'turbostat-fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
Pull turbostat fixes from Len Brown: - Fix a memory allocation issue that could corrupt output values or SEGV - Fix a perf initilization issue that could exit on some HW + kernels - Minor fixes * tag 'turbostat-fixes-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux: tools/power turbostat: Allow execution to continue after perf_l2_init() failure tools/power turbostat: Fix delimiter bug in print functions tools/power turbostat: Fix --show/--hide for individual cpuidle counters tools/power turbostat: Fix incorrect format variable tools/power turbostat: Consistently use print_float_value() tools/power/turbostat: Fix microcode patch level output for AMD/Hygon tools/power turbostat: Eliminate unnecessary data structure allocation tools/power turbostat: Fix swidle header vs data display tools/power turbostat: Fix illegal memory access when SMT is present and disabled
Diffstat (limited to 'tools')
-rw-r--r--tools/power/x86/turbostat/turbostat.c100
1 files changed, 54 insertions, 46 deletions
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 1a2671c28209..e9e8ef72395a 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2409,7 +2409,7 @@ struct topo_params {
int max_l3_id;
int max_node_num;
int nodes_per_pkg;
- int cores_per_node;
+ int cores_per_pkg;
int threads_per_core;
} topo;
@@ -2837,30 +2837,29 @@ static inline int print_name(int width, int *printed, char *delim, char *name, e
UNUSED(type);
if (format == FORMAT_RAW && width >= 64)
- return (sprintf(outp, "%s%-8s", (*printed++ ? delim : ""), name));
+ return (sprintf(outp, "%s%-8s", ((*printed)++ ? delim : ""), name));
else
- return (sprintf(outp, "%s%s", (*printed++ ? delim : ""), name));
+ return (sprintf(outp, "%s%s", ((*printed)++ ? delim : ""), name));
}
static inline int print_hex_value(int width, int *printed, char *delim, unsigned long long value)
{
if (width <= 32)
- return (sprintf(outp, "%s%08x", (*printed++ ? delim : ""), (unsigned int)value));
+ return (sprintf(outp, "%s%08x", ((*printed)++ ? delim : ""), (unsigned int)value));
else
- return (sprintf(outp, "%s%016llx", (*printed++ ? delim : ""), value));
+ return (sprintf(outp, "%s%016llx", ((*printed)++ ? delim : ""), value));
}
static inline int print_decimal_value(int width, int *printed, char *delim, unsigned long long value)
{
- if (width <= 32)
- return (sprintf(outp, "%s%d", (*printed++ ? delim : ""), (unsigned int)value));
- else
- return (sprintf(outp, "%s%-8lld", (*printed++ ? delim : ""), value));
+ UNUSED(width);
+
+ return (sprintf(outp, "%s%lld", ((*printed)++ ? delim : ""), value));
}
static inline int print_float_value(int *printed, char *delim, double value)
{
- return (sprintf(outp, "%s%0.2f", (*printed++ ? delim : ""), value));
+ return (sprintf(outp, "%s%0.2f", ((*printed)++ ? delim : ""), value));
}
void print_header(char *delim)
@@ -3469,7 +3468,7 @@ int format_counters(PER_THREAD_PARAMS)
for (i = 0, pp = sys.perf_tp; pp; ++i, pp = pp->next) {
if (pp->format == FORMAT_RAW)
outp += print_hex_value(pp->width, &printed, delim, t->perf_counter[i]);
- else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ else if (pp->format == FORMAT_DELTA || pp->format == FORMAT_AVERAGE)
outp += print_decimal_value(pp->width, &printed, delim, t->perf_counter[i]);
else if (pp->format == FORMAT_PERCENT) {
if (pp->type == COUNTER_USEC)
@@ -3490,12 +3489,12 @@ int format_counters(PER_THREAD_PARAMS)
case PMT_TYPE_XTAL_TIME:
value_converted = pct(value_raw / crystal_hz, interval_float);
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ outp += print_float_value(&printed, delim, value_converted);
break;
case PMT_TYPE_TCORE_CLOCK:
value_converted = pct(value_raw / tcore_clock_freq_hz, interval_float);
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ outp += print_float_value(&printed, delim, value_converted);
}
}
@@ -3539,7 +3538,7 @@ int format_counters(PER_THREAD_PARAMS)
for (i = 0, pp = sys.perf_cp; pp; i++, pp = pp->next) {
if (pp->format == FORMAT_RAW)
outp += print_hex_value(pp->width, &printed, delim, c->perf_counter[i]);
- else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ else if (pp->format == FORMAT_DELTA || pp->format == FORMAT_AVERAGE)
outp += print_decimal_value(pp->width, &printed, delim, c->perf_counter[i]);
else if (pp->format == FORMAT_PERCENT)
outp += print_float_value(&printed, delim, pct(c->perf_counter[i], tsc));
@@ -3695,7 +3694,7 @@ int format_counters(PER_THREAD_PARAMS)
outp += print_hex_value(pp->width, &printed, delim, p->perf_counter[i]);
else if (pp->type == COUNTER_K2M)
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), (unsigned int)p->perf_counter[i] / 1000);
- else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ else if (pp->format == FORMAT_DELTA || pp->format == FORMAT_AVERAGE)
outp += print_decimal_value(pp->width, &printed, delim, p->perf_counter[i]);
else if (pp->format == FORMAT_PERCENT)
outp += print_float_value(&printed, delim, pct(p->perf_counter[i], tsc));
@@ -9122,10 +9121,13 @@ void process_cpuid()
cpuid_has_hv = ecx_flags & (1 << 31);
if (!no_msr) {
- if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
+ if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch)) {
warnx("get_msr(UCODE)");
- else
+ } else {
ucode_patch_valid = true;
+ if (!authentic_amd && !hygon_genuine)
+ ucode_patch >>= 32;
+ }
}
/*
@@ -9139,7 +9141,7 @@ void process_cpuid()
if (!quiet) {
fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)", family, model, stepping, family, model, stepping);
if (ucode_patch_valid)
- fprintf(outf, " microcode 0x%x", (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
+ fprintf(outf, " microcode 0x%x", (unsigned int)ucode_patch);
fputc('\n', outf);
fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level);
@@ -9403,13 +9405,13 @@ void perf_l2_init(void)
if (!is_hybrid) {
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.uniform, perf_model_support->first.refs, -1, PERF_FORMAT_GROUP);
if (fd_l2_percpu[cpu] == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.refs);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.refs);
free_fd_l2_percpu();
return;
}
retval = open_perf_counter(cpu, perf_pmu_types.uniform, perf_model_support->first.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
if (retval == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.hits);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.hits);
free_fd_l2_percpu();
return;
}
@@ -9418,39 +9420,39 @@ void perf_l2_init(void)
if (perf_pcore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_pcore_set)) {
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.pcore, perf_model_support->first.refs, -1, PERF_FORMAT_GROUP);
if (fd_l2_percpu[cpu] == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.refs);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.refs);
free_fd_l2_percpu();
return;
}
retval = open_perf_counter(cpu, perf_pmu_types.pcore, perf_model_support->first.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
if (retval == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.hits);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.hits);
free_fd_l2_percpu();
return;
}
} else if (perf_ecore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_ecore_set)) {
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.ecore, perf_model_support->second.refs, -1, PERF_FORMAT_GROUP);
if (fd_l2_percpu[cpu] == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->second.refs);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.ecore, perf_model_support->second.refs);
free_fd_l2_percpu();
return;
}
retval = open_perf_counter(cpu, perf_pmu_types.ecore, perf_model_support->second.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
if (retval == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->second.hits);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.ecore, perf_model_support->second.hits);
free_fd_l2_percpu();
return;
}
} else if (perf_lcore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_lcore_set)) {
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.lcore, perf_model_support->third.refs, -1, PERF_FORMAT_GROUP);
if (fd_l2_percpu[cpu] == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->third.refs);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.lcore, perf_model_support->third.refs);
free_fd_l2_percpu();
return;
}
retval = open_perf_counter(cpu, perf_pmu_types.lcore, perf_model_support->third.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
if (retval == -1) {
- err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->third.hits);
+ warnx("%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.lcore, perf_model_support->third.hits);
free_fd_l2_percpu();
return;
}
@@ -9634,9 +9636,9 @@ void topology_probe(bool startup)
topo.max_core_id = max_core_id; /* within a package */
topo.max_package_id = max_package_id;
- topo.cores_per_node = max_core_id + 1;
+ topo.cores_per_pkg = max_core_id + 1;
if (debug > 1)
- fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.cores_per_node);
+ fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.cores_per_pkg);
if (!summary_only)
BIC_PRESENT(BIC_Core);
@@ -9701,14 +9703,13 @@ error:
void allocate_counters(struct counters *counters)
{
int i;
- int num_cores = topo.cores_per_node * topo.nodes_per_pkg * topo.num_packages;
- int num_threads = topo.threads_per_core * num_cores;
+ int num_cores = topo.cores_per_pkg * topo.num_packages;
- counters->threads = calloc(num_threads, sizeof(struct thread_data));
+ counters->threads = calloc(topo.max_cpu_num + 1, sizeof(struct thread_data));
if (counters->threads == NULL)
goto error;
- for (i = 0; i < num_threads; i++)
+ for (i = 0; i < topo.max_cpu_num + 1; i++)
(counters->threads)[i].cpu_id = -1;
counters->cores = calloc(num_cores, sizeof(struct core_data));
@@ -11284,6 +11285,14 @@ void probe_cpuidle_residency(void)
}
}
+static bool cpuidle_counter_wanted(char *name)
+{
+ if (is_deferred_skip(name))
+ return false;
+
+ return DO_BIC(BIC_cpuidle) || is_deferred_add(name);
+}
+
void probe_cpuidle_counts(void)
{
char path[64];
@@ -11293,7 +11302,7 @@ void probe_cpuidle_counts(void)
int min_state = 1024, max_state = 0;
char *sp;
- if (!DO_BIC(BIC_cpuidle))
+ if (!DO_BIC(BIC_cpuidle) && !deferred_add_index)
return;
for (state = 10; state >= 0; --state) {
@@ -11308,12 +11317,6 @@ void probe_cpuidle_counts(void)
remove_underbar(name_buf);
- if (!DO_BIC(BIC_cpuidle) && !is_deferred_add(name_buf))
- continue;
-
- if (is_deferred_skip(name_buf))
- continue;
-
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
if (!sp)
@@ -11328,16 +11331,19 @@ void probe_cpuidle_counts(void)
* Add 'C1+' for C1, and so on. The 'below' sysfs file always contains 0 for
* the last state, so do not add it.
*/
-
*sp = '+';
*(sp + 1) = '\0';
- sprintf(path, "cpuidle/state%d/below", state);
- add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ if (cpuidle_counter_wanted(name_buf)) {
+ sprintf(path, "cpuidle/state%d/below", state);
+ add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ }
}
*sp = '\0';
- sprintf(path, "cpuidle/state%d/usage", state);
- add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ if (cpuidle_counter_wanted(name_buf)) {
+ sprintf(path, "cpuidle/state%d/usage", state);
+ add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ }
/*
* The 'above' sysfs file always contains 0 for the shallowest state (smallest
@@ -11346,8 +11352,10 @@ void probe_cpuidle_counts(void)
if (state != min_state) {
*sp = '-';
*(sp + 1) = '\0';
- sprintf(path, "cpuidle/state%d/above", state);
- add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ if (cpuidle_counter_wanted(name_buf)) {
+ sprintf(path, "cpuidle/state%d/above", state);
+ add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU, 0);
+ }
}
}
}