1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
|
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/env.h"
#include "util/pmu.h"
#include "util/pmus.h"
#include "util/stat.h"
#include "util/strbuf.h"
#include "linux/string.h"
#include "topdown.h"
#include "evsel.h"
#include "util/debug.h"
#include "env.h"
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
#define IBS_OP_L3MISSONLY (1ULL << 16)
void arch_evsel__set_sample_weight(struct evsel *evsel)
{
evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
}
/* Check whether the evsel's PMU supports the perf metrics */
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
{
struct perf_pmu *pmu;
if (!topdown_sys_has_perf_metrics())
return false;
/*
* The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
* non-hybrid machine, "cpu_core" PMU on a hybrid machine. The
* topdown_sys_has_perf_metrics checks the slots event is only available
* for the core PMU, which supports the perf metrics feature. Checking
* both the PERF_TYPE_RAW type and the slots event should be good enough
* to detect the perf metrics feature.
*/
pmu = evsel__find_pmu(evsel);
return pmu && pmu->type == PERF_TYPE_RAW;
}
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
{
if (!evsel__sys_has_perf_metrics(evsel))
return false;
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
}
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
const char *event_name;
if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
event_name = evsel__hw_names[event];
else
event_name = "unknown-hardware";
/* The PMU type is not required for the non-hybrid platform. */
if (!pmu)
return scnprintf(bf, size, "%s", event_name);
return scnprintf(bf, size, "%s/%s/",
evsel->pmu ? evsel->pmu->name : "cpu",
event_name);
}
static void ibs_l3miss_warn(void)
{
pr_warning(
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
}
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
{
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
static int warned_once;
if (warned_once || !x86__is_amd_cpu())
return;
evsel_pmu = evsel__find_pmu(evsel);
if (!evsel_pmu)
return;
ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
ibs_op_pmu = perf_pmus__find("ibs_op");
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
if (attr->config & IBS_FETCH_L3MISSONLY) {
ibs_l3miss_warn();
warned_once = 1;
}
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
if (attr->config & IBS_OP_L3MISSONLY) {
ibs_l3miss_warn();
warned_once = 1;
}
}
}
static int amd_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
{
struct perf_pmu *pmu;
if (evsel->core.attr.precise_ip == 0)
return 0;
pmu = evsel__find_pmu(evsel);
if (!pmu || strncmp(pmu->name, "ibs", 3))
return 0;
/* More verbose IBS errors. */
if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
"again without the privilege modifiers (like 'k') at the end.");
}
return 0;
}
static int intel_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
{
struct strbuf sb = STRBUF_INIT;
int ret;
if (err != EINVAL)
return 0;
if (!topdown_sys_has_perf_metrics())
return 0;
if (arch_is_topdown_slots(evsel)) {
if (!evsel__is_group_leader(evsel)) {
evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
evlist__format_evsels(evsel->evlist, &sb, 2048);
ret = scnprintf(msg, size, "Topdown slots event can only be group leader "
"in '%s'.", sb.buf);
strbuf_release(&sb);
return ret;
}
} else if (arch_is_topdown_metrics(evsel)) {
struct evsel *pos;
evlist__for_each_entry(evsel->evlist, pos) {
if (pos == evsel || !arch_is_topdown_metrics(pos))
continue;
if (pos->core.attr.config != evsel->core.attr.config)
continue;
evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
evlist__format_evsels(evsel->evlist, &sb, 2048);
ret = scnprintf(msg, size, "Perf metric event '%s' is duplicated "
"in the same group (only one event is allowed) in '%s'.",
evsel__name(evsel), sb.buf);
strbuf_release(&sb);
return ret;
}
}
return 0;
}
int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
{
return x86__is_amd_cpu()
? amd_evsel__open_strerror(evsel, msg, size)
: intel_evsel__open_strerror(evsel, err, msg, size);
}
|