1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Per core/cpu state
4 *
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
18
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
24 #include <asm/apic.h>
25 #include <asm/cpu_device_id.h>
26
27 #include "../perf_event.h"
28
29 /*
30 * Intel PerfMon, used on Core and later.
31 */
32 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
33 {
34 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
35 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
36 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
37 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
38 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
39 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
40 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
41 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
42 };
43
44 static struct event_constraint intel_core_event_constraints[] __read_mostly =
45 {
46 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
47 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
48 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
49 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
50 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
51 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
52 EVENT_CONSTRAINT_END
53 };
54
55 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
56 {
57 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
58 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
59 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
70 EVENT_CONSTRAINT_END
71 };
72
73 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74 {
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
86 EVENT_CONSTRAINT_END
87 };
88
89 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90 {
91 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
92 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
93 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
94 EVENT_EXTRA_END
95 };
96
97 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
98 {
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
102 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
103 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
104 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
105 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
106 EVENT_CONSTRAINT_END
107 };
108
109 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
110 {
111 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
112 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
113 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
120 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
121 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
122 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123
124 /*
125 * When HT is off these events can only run on the bottom 4 counters
126 * When HT is on, they are impacted by the HT bug and require EXCL access
127 */
128 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132
133 EVENT_CONSTRAINT_END
134 };
135
136 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
137 {
138 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
139 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
140 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
141 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
143 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
144 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
146 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
150 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151
152 /*
153 * When HT is off these events can only run on the bottom 4 counters
154 * When HT is on, they are impacted by the HT bug and require EXCL access
155 */
156 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
159 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
160
161 EVENT_CONSTRAINT_END
162 };
163
164 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
165 {
166 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
167 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
168 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
169 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
170 EVENT_EXTRA_END
171 };
172
173 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
174 {
175 EVENT_CONSTRAINT_END
176 };
177
178 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
179 {
180 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
181 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
182 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
183 EVENT_CONSTRAINT_END
184 };
185
186 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
187 {
188 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
189 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
190 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
191 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
192 FIXED_EVENT_CONSTRAINT(0x0500, 4),
193 FIXED_EVENT_CONSTRAINT(0x0600, 5),
194 FIXED_EVENT_CONSTRAINT(0x0700, 6),
195 FIXED_EVENT_CONSTRAINT(0x0800, 7),
196 FIXED_EVENT_CONSTRAINT(0x0900, 8),
197 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
198 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
199 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
200 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
201 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
202 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
203 FIXED_EVENT_CONSTRAINT(0x1000, 15),
204 EVENT_CONSTRAINT_END
205 };
206
207 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
208 {
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
212 EVENT_CONSTRAINT_END
213 };
214
215 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
216 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
217 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
218 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
219 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
220 EVENT_CONSTRAINT_END
221 };
222
223 static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
227 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
228 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
229 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
230 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
231 EVENT_CONSTRAINT_END
232 };
233
234 static struct event_constraint intel_skl_event_constraints[] = {
235 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
236 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
237 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
238 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
239
240 /*
241 * when HT is off, these can only run on the bottom 4 counters
242 */
243 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
244 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
245 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
246 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
247 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
248
249 EVENT_CONSTRAINT_END
250 };
251
252 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
253 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
254 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
255 EVENT_EXTRA_END
256 };
257
258 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
259 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
260 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
261 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
262 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
263 EVENT_EXTRA_END
264 };
265
266 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
267 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
268 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
269 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
270 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
271 EVENT_EXTRA_END
272 };
273
274 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
275 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
276 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
277 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
278 /*
279 * Note the low 8 bits eventsel code is not a continuous field, containing
280 * some #GPing bits. These are masked out.
281 */
282 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
283 EVENT_EXTRA_END
284 };
285
286 static struct event_constraint intel_icl_event_constraints[] = {
287 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
288 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
289 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
290 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
291 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
292 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
293 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
294 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
295 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
296 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
297 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
298 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
299 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
300 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
301 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
302 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
303 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
304 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
305 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
306 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
307 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
308 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
309 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
310 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
311 EVENT_CONSTRAINT_END
312 };
313
314 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
315 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
316 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
317 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
318 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
319 EVENT_EXTRA_END
320 };
321
322 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
323 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
324 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
325 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
326 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
327 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
328 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
329 EVENT_EXTRA_END
330 };
331
332 static struct event_constraint intel_glc_event_constraints[] = {
333 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
334 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
335 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
336 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
337 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
338 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
339 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
340 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
341 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
342 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
343 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
344 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
345 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
346 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
347
348 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
349 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
350 /*
351 * Generally event codes < 0x90 are restricted to counters 0-3.
352 * The 0x2E and 0x3C are exception, which has no restriction.
353 */
354 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
355
356 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
357 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
358 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
359 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
360 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
361 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
362 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
363 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
364 /*
365 * Generally event codes >= 0x90 are likely to have no restrictions.
366 * The exception are defined as above.
367 */
368 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
369
370 EVENT_CONSTRAINT_END
371 };
372
373 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
374 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
375 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
376 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
377 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
378 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
379 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
380 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
381 EVENT_EXTRA_END
382 };
383
384 static struct event_constraint intel_lnc_event_constraints[] = {
385 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
386 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
387 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
388 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
389 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
390 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
391 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
392 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
393 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
394 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
395 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
396 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
397 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
398 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
399
400 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
401 INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
402
403 INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
404 INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
405 /*
406 * Generally event codes < 0x90 are restricted to counters 0-3.
407 * The 0x2E and 0x3C are exception, which has no restriction.
408 */
409 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
410
411 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
412 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
413 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
414 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
415 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
416 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
417 INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
418 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
419 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
420 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
421
422 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
423 /*
424 * Generally event codes >= 0x90 are likely to have no restrictions.
425 * The exception are defined as above.
426 */
427 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
428
429 EVENT_CONSTRAINT_END
430 };
431
432
433 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
434 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
435 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
436
437 static struct attribute *nhm_mem_events_attrs[] = {
438 EVENT_PTR(mem_ld_nhm),
439 NULL,
440 };
441
442 /*
443 * topdown events for Intel Core CPUs.
444 *
445 * The events are all in slots, which is a free slot in a 4 wide
446 * pipeline. Some events are already reported in slots, for cycle
447 * events we multiply by the pipeline width (4).
448 *
449 * With Hyper Threading on, topdown metrics are either summed or averaged
450 * between the threads of a core: (count_t0 + count_t1).
451 *
452 * For the average case the metric is always scaled to pipeline width,
453 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
454 */
455
456 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
457 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
458 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
459 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
460 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
461 "event=0xe,umask=0x1"); /* uops_issued.any */
462 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
463 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
464 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
465 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
466 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
467 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
468 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
469 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
470 "4", "2");
471
472 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
473 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
474 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
475 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
476 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
477 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
478 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
479 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
480 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
481
482 static struct attribute *snb_events_attrs[] = {
483 EVENT_PTR(td_slots_issued),
484 EVENT_PTR(td_slots_retired),
485 EVENT_PTR(td_fetch_bubbles),
486 EVENT_PTR(td_total_slots),
487 EVENT_PTR(td_total_slots_scale),
488 EVENT_PTR(td_recovery_bubbles),
489 EVENT_PTR(td_recovery_bubbles_scale),
490 NULL,
491 };
492
493 static struct attribute *snb_mem_events_attrs[] = {
494 EVENT_PTR(mem_ld_snb),
495 EVENT_PTR(mem_st_snb),
496 NULL,
497 };
498
499 static struct event_constraint intel_hsw_event_constraints[] = {
500 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
501 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
502 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
503 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
504 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
505 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
506 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
507 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
508 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
509 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
510 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
511 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
512
513 /*
514 * When HT is off these events can only run on the bottom 4 counters
515 * When HT is on, they are impacted by the HT bug and require EXCL access
516 */
517 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
518 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
519 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
520 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
521
522 EVENT_CONSTRAINT_END
523 };
524
525 static struct event_constraint intel_bdw_event_constraints[] = {
526 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
527 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
528 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
529 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
530 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
531 /*
532 * when HT is off, these can only run on the bottom 4 counters
533 */
534 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
535 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
536 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
537 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
538 EVENT_CONSTRAINT_END
539 };
540
intel_pmu_event_map(int hw_event)541 static u64 intel_pmu_event_map(int hw_event)
542 {
543 return intel_perfmon_event_map[hw_event];
544 }
545
546 static __initconst const u64 glc_hw_cache_event_ids
547 [PERF_COUNT_HW_CACHE_MAX]
548 [PERF_COUNT_HW_CACHE_OP_MAX]
549 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
550 {
551 [ C(L1D ) ] = {
552 [ C(OP_READ) ] = {
553 [ C(RESULT_ACCESS) ] = 0x81d0,
554 [ C(RESULT_MISS) ] = 0xe124,
555 },
556 [ C(OP_WRITE) ] = {
557 [ C(RESULT_ACCESS) ] = 0x82d0,
558 },
559 },
560 [ C(L1I ) ] = {
561 [ C(OP_READ) ] = {
562 [ C(RESULT_MISS) ] = 0xe424,
563 },
564 [ C(OP_WRITE) ] = {
565 [ C(RESULT_ACCESS) ] = -1,
566 [ C(RESULT_MISS) ] = -1,
567 },
568 },
569 [ C(LL ) ] = {
570 [ C(OP_READ) ] = {
571 [ C(RESULT_ACCESS) ] = 0x12a,
572 [ C(RESULT_MISS) ] = 0x12a,
573 },
574 [ C(OP_WRITE) ] = {
575 [ C(RESULT_ACCESS) ] = 0x12a,
576 [ C(RESULT_MISS) ] = 0x12a,
577 },
578 },
579 [ C(DTLB) ] = {
580 [ C(OP_READ) ] = {
581 [ C(RESULT_ACCESS) ] = 0x81d0,
582 [ C(RESULT_MISS) ] = 0xe12,
583 },
584 [ C(OP_WRITE) ] = {
585 [ C(RESULT_ACCESS) ] = 0x82d0,
586 [ C(RESULT_MISS) ] = 0xe13,
587 },
588 },
589 [ C(ITLB) ] = {
590 [ C(OP_READ) ] = {
591 [ C(RESULT_ACCESS) ] = -1,
592 [ C(RESULT_MISS) ] = 0xe11,
593 },
594 [ C(OP_WRITE) ] = {
595 [ C(RESULT_ACCESS) ] = -1,
596 [ C(RESULT_MISS) ] = -1,
597 },
598 [ C(OP_PREFETCH) ] = {
599 [ C(RESULT_ACCESS) ] = -1,
600 [ C(RESULT_MISS) ] = -1,
601 },
602 },
603 [ C(BPU ) ] = {
604 [ C(OP_READ) ] = {
605 [ C(RESULT_ACCESS) ] = 0x4c4,
606 [ C(RESULT_MISS) ] = 0x4c5,
607 },
608 [ C(OP_WRITE) ] = {
609 [ C(RESULT_ACCESS) ] = -1,
610 [ C(RESULT_MISS) ] = -1,
611 },
612 [ C(OP_PREFETCH) ] = {
613 [ C(RESULT_ACCESS) ] = -1,
614 [ C(RESULT_MISS) ] = -1,
615 },
616 },
617 [ C(NODE) ] = {
618 [ C(OP_READ) ] = {
619 [ C(RESULT_ACCESS) ] = 0x12a,
620 [ C(RESULT_MISS) ] = 0x12a,
621 },
622 },
623 };
624
625 static __initconst const u64 glc_hw_cache_extra_regs
626 [PERF_COUNT_HW_CACHE_MAX]
627 [PERF_COUNT_HW_CACHE_OP_MAX]
628 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
629 {
630 [ C(LL ) ] = {
631 [ C(OP_READ) ] = {
632 [ C(RESULT_ACCESS) ] = 0x10001,
633 [ C(RESULT_MISS) ] = 0x3fbfc00001,
634 },
635 [ C(OP_WRITE) ] = {
636 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
637 [ C(RESULT_MISS) ] = 0x3f3fc00002,
638 },
639 },
640 [ C(NODE) ] = {
641 [ C(OP_READ) ] = {
642 [ C(RESULT_ACCESS) ] = 0x10c000001,
643 [ C(RESULT_MISS) ] = 0x3fb3000001,
644 },
645 },
646 };
647
648 /*
649 * Notes on the events:
650 * - data reads do not include code reads (comparable to earlier tables)
651 * - data counts include speculative execution (except L1 write, dtlb, bpu)
652 * - remote node access includes remote memory, remote cache, remote mmio.
653 * - prefetches are not included in the counts.
654 * - icache miss does not include decoded icache
655 */
656
657 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
658 #define SKL_DEMAND_RFO BIT_ULL(1)
659 #define SKL_ANY_RESPONSE BIT_ULL(16)
660 #define SKL_SUPPLIER_NONE BIT_ULL(17)
661 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
662 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
663 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
664 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
665 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
666 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
667 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
668 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
669 #define SKL_SPL_HIT BIT_ULL(30)
670 #define SKL_SNOOP_NONE BIT_ULL(31)
671 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
672 #define SKL_SNOOP_MISS BIT_ULL(33)
673 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
674 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
675 #define SKL_SNOOP_HITM BIT_ULL(36)
676 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
677 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
678 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
679 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
680 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
681 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
682 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
683 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
684 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
685 SKL_SNOOP_HITM|SKL_SPL_HIT)
686 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
687 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
688 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
689 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
690 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
691
692 static __initconst const u64 skl_hw_cache_event_ids
693 [PERF_COUNT_HW_CACHE_MAX]
694 [PERF_COUNT_HW_CACHE_OP_MAX]
695 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
696 {
697 [ C(L1D ) ] = {
698 [ C(OP_READ) ] = {
699 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
700 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
701 },
702 [ C(OP_WRITE) ] = {
703 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
704 [ C(RESULT_MISS) ] = 0x0,
705 },
706 [ C(OP_PREFETCH) ] = {
707 [ C(RESULT_ACCESS) ] = 0x0,
708 [ C(RESULT_MISS) ] = 0x0,
709 },
710 },
711 [ C(L1I ) ] = {
712 [ C(OP_READ) ] = {
713 [ C(RESULT_ACCESS) ] = 0x0,
714 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
715 },
716 [ C(OP_WRITE) ] = {
717 [ C(RESULT_ACCESS) ] = -1,
718 [ C(RESULT_MISS) ] = -1,
719 },
720 [ C(OP_PREFETCH) ] = {
721 [ C(RESULT_ACCESS) ] = 0x0,
722 [ C(RESULT_MISS) ] = 0x0,
723 },
724 },
725 [ C(LL ) ] = {
726 [ C(OP_READ) ] = {
727 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
728 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
729 },
730 [ C(OP_WRITE) ] = {
731 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
732 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
733 },
734 [ C(OP_PREFETCH) ] = {
735 [ C(RESULT_ACCESS) ] = 0x0,
736 [ C(RESULT_MISS) ] = 0x0,
737 },
738 },
739 [ C(DTLB) ] = {
740 [ C(OP_READ) ] = {
741 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
742 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
743 },
744 [ C(OP_WRITE) ] = {
745 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
746 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
747 },
748 [ C(OP_PREFETCH) ] = {
749 [ C(RESULT_ACCESS) ] = 0x0,
750 [ C(RESULT_MISS) ] = 0x0,
751 },
752 },
753 [ C(ITLB) ] = {
754 [ C(OP_READ) ] = {
755 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
756 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
757 },
758 [ C(OP_WRITE) ] = {
759 [ C(RESULT_ACCESS) ] = -1,
760 [ C(RESULT_MISS) ] = -1,
761 },
762 [ C(OP_PREFETCH) ] = {
763 [ C(RESULT_ACCESS) ] = -1,
764 [ C(RESULT_MISS) ] = -1,
765 },
766 },
767 [ C(BPU ) ] = {
768 [ C(OP_READ) ] = {
769 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
770 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
771 },
772 [ C(OP_WRITE) ] = {
773 [ C(RESULT_ACCESS) ] = -1,
774 [ C(RESULT_MISS) ] = -1,
775 },
776 [ C(OP_PREFETCH) ] = {
777 [ C(RESULT_ACCESS) ] = -1,
778 [ C(RESULT_MISS) ] = -1,
779 },
780 },
781 [ C(NODE) ] = {
782 [ C(OP_READ) ] = {
783 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
784 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
785 },
786 [ C(OP_WRITE) ] = {
787 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
788 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
789 },
790 [ C(OP_PREFETCH) ] = {
791 [ C(RESULT_ACCESS) ] = 0x0,
792 [ C(RESULT_MISS) ] = 0x0,
793 },
794 },
795 };
796
797 static __initconst const u64 skl_hw_cache_extra_regs
798 [PERF_COUNT_HW_CACHE_MAX]
799 [PERF_COUNT_HW_CACHE_OP_MAX]
800 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
801 {
802 [ C(LL ) ] = {
803 [ C(OP_READ) ] = {
804 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
805 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
806 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
807 SKL_L3_MISS|SKL_ANY_SNOOP|
808 SKL_SUPPLIER_NONE,
809 },
810 [ C(OP_WRITE) ] = {
811 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
812 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
813 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
814 SKL_L3_MISS|SKL_ANY_SNOOP|
815 SKL_SUPPLIER_NONE,
816 },
817 [ C(OP_PREFETCH) ] = {
818 [ C(RESULT_ACCESS) ] = 0x0,
819 [ C(RESULT_MISS) ] = 0x0,
820 },
821 },
822 [ C(NODE) ] = {
823 [ C(OP_READ) ] = {
824 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
825 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
826 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
827 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
828 },
829 [ C(OP_WRITE) ] = {
830 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
831 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
832 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
833 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
834 },
835 [ C(OP_PREFETCH) ] = {
836 [ C(RESULT_ACCESS) ] = 0x0,
837 [ C(RESULT_MISS) ] = 0x0,
838 },
839 },
840 };
841
842 #define SNB_DMND_DATA_RD (1ULL << 0)
843 #define SNB_DMND_RFO (1ULL << 1)
844 #define SNB_DMND_IFETCH (1ULL << 2)
845 #define SNB_DMND_WB (1ULL << 3)
846 #define SNB_PF_DATA_RD (1ULL << 4)
847 #define SNB_PF_RFO (1ULL << 5)
848 #define SNB_PF_IFETCH (1ULL << 6)
849 #define SNB_LLC_DATA_RD (1ULL << 7)
850 #define SNB_LLC_RFO (1ULL << 8)
851 #define SNB_LLC_IFETCH (1ULL << 9)
852 #define SNB_BUS_LOCKS (1ULL << 10)
853 #define SNB_STRM_ST (1ULL << 11)
854 #define SNB_OTHER (1ULL << 15)
855 #define SNB_RESP_ANY (1ULL << 16)
856 #define SNB_NO_SUPP (1ULL << 17)
857 #define SNB_LLC_HITM (1ULL << 18)
858 #define SNB_LLC_HITE (1ULL << 19)
859 #define SNB_LLC_HITS (1ULL << 20)
860 #define SNB_LLC_HITF (1ULL << 21)
861 #define SNB_LOCAL (1ULL << 22)
862 #define SNB_REMOTE (0xffULL << 23)
863 #define SNB_SNP_NONE (1ULL << 31)
864 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
865 #define SNB_SNP_MISS (1ULL << 33)
866 #define SNB_NO_FWD (1ULL << 34)
867 #define SNB_SNP_FWD (1ULL << 35)
868 #define SNB_HITM (1ULL << 36)
869 #define SNB_NON_DRAM (1ULL << 37)
870
871 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
872 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
873 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
874
875 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
876 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
877 SNB_HITM)
878
879 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
880 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
881
882 #define SNB_L3_ACCESS SNB_RESP_ANY
883 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
884
885 static __initconst const u64 snb_hw_cache_extra_regs
886 [PERF_COUNT_HW_CACHE_MAX]
887 [PERF_COUNT_HW_CACHE_OP_MAX]
888 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
889 {
890 [ C(LL ) ] = {
891 [ C(OP_READ) ] = {
892 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
893 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
894 },
895 [ C(OP_WRITE) ] = {
896 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
897 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
898 },
899 [ C(OP_PREFETCH) ] = {
900 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
901 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
902 },
903 },
904 [ C(NODE) ] = {
905 [ C(OP_READ) ] = {
906 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
907 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
908 },
909 [ C(OP_WRITE) ] = {
910 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
911 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
912 },
913 [ C(OP_PREFETCH) ] = {
914 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
915 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
916 },
917 },
918 };
919
920 static __initconst const u64 snb_hw_cache_event_ids
921 [PERF_COUNT_HW_CACHE_MAX]
922 [PERF_COUNT_HW_CACHE_OP_MAX]
923 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
924 {
925 [ C(L1D) ] = {
926 [ C(OP_READ) ] = {
927 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
928 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
929 },
930 [ C(OP_WRITE) ] = {
931 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
932 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
933 },
934 [ C(OP_PREFETCH) ] = {
935 [ C(RESULT_ACCESS) ] = 0x0,
936 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
937 },
938 },
939 [ C(L1I ) ] = {
940 [ C(OP_READ) ] = {
941 [ C(RESULT_ACCESS) ] = 0x0,
942 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
943 },
944 [ C(OP_WRITE) ] = {
945 [ C(RESULT_ACCESS) ] = -1,
946 [ C(RESULT_MISS) ] = -1,
947 },
948 [ C(OP_PREFETCH) ] = {
949 [ C(RESULT_ACCESS) ] = 0x0,
950 [ C(RESULT_MISS) ] = 0x0,
951 },
952 },
953 [ C(LL ) ] = {
954 [ C(OP_READ) ] = {
955 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
956 [ C(RESULT_ACCESS) ] = 0x01b7,
957 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
958 [ C(RESULT_MISS) ] = 0x01b7,
959 },
960 [ C(OP_WRITE) ] = {
961 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
962 [ C(RESULT_ACCESS) ] = 0x01b7,
963 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
964 [ C(RESULT_MISS) ] = 0x01b7,
965 },
966 [ C(OP_PREFETCH) ] = {
967 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
968 [ C(RESULT_ACCESS) ] = 0x01b7,
969 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
970 [ C(RESULT_MISS) ] = 0x01b7,
971 },
972 },
973 [ C(DTLB) ] = {
974 [ C(OP_READ) ] = {
975 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
976 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
977 },
978 [ C(OP_WRITE) ] = {
979 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
980 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
981 },
982 [ C(OP_PREFETCH) ] = {
983 [ C(RESULT_ACCESS) ] = 0x0,
984 [ C(RESULT_MISS) ] = 0x0,
985 },
986 },
987 [ C(ITLB) ] = {
988 [ C(OP_READ) ] = {
989 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
990 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
991 },
992 [ C(OP_WRITE) ] = {
993 [ C(RESULT_ACCESS) ] = -1,
994 [ C(RESULT_MISS) ] = -1,
995 },
996 [ C(OP_PREFETCH) ] = {
997 [ C(RESULT_ACCESS) ] = -1,
998 [ C(RESULT_MISS) ] = -1,
999 },
1000 },
1001 [ C(BPU ) ] = {
1002 [ C(OP_READ) ] = {
1003 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1004 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1005 },
1006 [ C(OP_WRITE) ] = {
1007 [ C(RESULT_ACCESS) ] = -1,
1008 [ C(RESULT_MISS) ] = -1,
1009 },
1010 [ C(OP_PREFETCH) ] = {
1011 [ C(RESULT_ACCESS) ] = -1,
1012 [ C(RESULT_MISS) ] = -1,
1013 },
1014 },
1015 [ C(NODE) ] = {
1016 [ C(OP_READ) ] = {
1017 [ C(RESULT_ACCESS) ] = 0x01b7,
1018 [ C(RESULT_MISS) ] = 0x01b7,
1019 },
1020 [ C(OP_WRITE) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x01b7,
1022 [ C(RESULT_MISS) ] = 0x01b7,
1023 },
1024 [ C(OP_PREFETCH) ] = {
1025 [ C(RESULT_ACCESS) ] = 0x01b7,
1026 [ C(RESULT_MISS) ] = 0x01b7,
1027 },
1028 },
1029
1030 };
1031
1032 /*
1033 * Notes on the events:
1034 * - data reads do not include code reads (comparable to earlier tables)
1035 * - data counts include speculative execution (except L1 write, dtlb, bpu)
1036 * - remote node access includes remote memory, remote cache, remote mmio.
1037 * - prefetches are not included in the counts because they are not
1038 * reliably counted.
1039 */
1040
1041 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
1042 #define HSW_DEMAND_RFO BIT_ULL(1)
1043 #define HSW_ANY_RESPONSE BIT_ULL(16)
1044 #define HSW_SUPPLIER_NONE BIT_ULL(17)
1045 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
1046 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
1047 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
1048 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
1049 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
1050 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1051 HSW_L3_MISS_REMOTE_HOP2P)
1052 #define HSW_SNOOP_NONE BIT_ULL(31)
1053 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
1054 #define HSW_SNOOP_MISS BIT_ULL(33)
1055 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
1056 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
1057 #define HSW_SNOOP_HITM BIT_ULL(36)
1058 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
1059 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
1060 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1061 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1062 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1063 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1064 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
1065 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
1066 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
1067 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1068 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
1069
1070 #define BDW_L3_MISS_LOCAL BIT(26)
1071 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1072 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1073 HSW_L3_MISS_REMOTE_HOP2P)
1074
1075
1076 static __initconst const u64 hsw_hw_cache_event_ids
1077 [PERF_COUNT_HW_CACHE_MAX]
1078 [PERF_COUNT_HW_CACHE_OP_MAX]
1079 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1080 {
1081 [ C(L1D ) ] = {
1082 [ C(OP_READ) ] = {
1083 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1084 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1085 },
1086 [ C(OP_WRITE) ] = {
1087 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1088 [ C(RESULT_MISS) ] = 0x0,
1089 },
1090 [ C(OP_PREFETCH) ] = {
1091 [ C(RESULT_ACCESS) ] = 0x0,
1092 [ C(RESULT_MISS) ] = 0x0,
1093 },
1094 },
1095 [ C(L1I ) ] = {
1096 [ C(OP_READ) ] = {
1097 [ C(RESULT_ACCESS) ] = 0x0,
1098 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1099 },
1100 [ C(OP_WRITE) ] = {
1101 [ C(RESULT_ACCESS) ] = -1,
1102 [ C(RESULT_MISS) ] = -1,
1103 },
1104 [ C(OP_PREFETCH) ] = {
1105 [ C(RESULT_ACCESS) ] = 0x0,
1106 [ C(RESULT_MISS) ] = 0x0,
1107 },
1108 },
1109 [ C(LL ) ] = {
1110 [ C(OP_READ) ] = {
1111 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1112 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1113 },
1114 [ C(OP_WRITE) ] = {
1115 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1116 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1117 },
1118 [ C(OP_PREFETCH) ] = {
1119 [ C(RESULT_ACCESS) ] = 0x0,
1120 [ C(RESULT_MISS) ] = 0x0,
1121 },
1122 },
1123 [ C(DTLB) ] = {
1124 [ C(OP_READ) ] = {
1125 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1126 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1127 },
1128 [ C(OP_WRITE) ] = {
1129 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1130 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1131 },
1132 [ C(OP_PREFETCH) ] = {
1133 [ C(RESULT_ACCESS) ] = 0x0,
1134 [ C(RESULT_MISS) ] = 0x0,
1135 },
1136 },
1137 [ C(ITLB) ] = {
1138 [ C(OP_READ) ] = {
1139 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1140 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1141 },
1142 [ C(OP_WRITE) ] = {
1143 [ C(RESULT_ACCESS) ] = -1,
1144 [ C(RESULT_MISS) ] = -1,
1145 },
1146 [ C(OP_PREFETCH) ] = {
1147 [ C(RESULT_ACCESS) ] = -1,
1148 [ C(RESULT_MISS) ] = -1,
1149 },
1150 },
1151 [ C(BPU ) ] = {
1152 [ C(OP_READ) ] = {
1153 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1154 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1155 },
1156 [ C(OP_WRITE) ] = {
1157 [ C(RESULT_ACCESS) ] = -1,
1158 [ C(RESULT_MISS) ] = -1,
1159 },
1160 [ C(OP_PREFETCH) ] = {
1161 [ C(RESULT_ACCESS) ] = -1,
1162 [ C(RESULT_MISS) ] = -1,
1163 },
1164 },
1165 [ C(NODE) ] = {
1166 [ C(OP_READ) ] = {
1167 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1168 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1169 },
1170 [ C(OP_WRITE) ] = {
1171 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1172 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1173 },
1174 [ C(OP_PREFETCH) ] = {
1175 [ C(RESULT_ACCESS) ] = 0x0,
1176 [ C(RESULT_MISS) ] = 0x0,
1177 },
1178 },
1179 };
1180
1181 static __initconst const u64 hsw_hw_cache_extra_regs
1182 [PERF_COUNT_HW_CACHE_MAX]
1183 [PERF_COUNT_HW_CACHE_OP_MAX]
1184 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1185 {
1186 [ C(LL ) ] = {
1187 [ C(OP_READ) ] = {
1188 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1189 HSW_LLC_ACCESS,
1190 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1191 HSW_L3_MISS|HSW_ANY_SNOOP,
1192 },
1193 [ C(OP_WRITE) ] = {
1194 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1195 HSW_LLC_ACCESS,
1196 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1197 HSW_L3_MISS|HSW_ANY_SNOOP,
1198 },
1199 [ C(OP_PREFETCH) ] = {
1200 [ C(RESULT_ACCESS) ] = 0x0,
1201 [ C(RESULT_MISS) ] = 0x0,
1202 },
1203 },
1204 [ C(NODE) ] = {
1205 [ C(OP_READ) ] = {
1206 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1207 HSW_L3_MISS_LOCAL_DRAM|
1208 HSW_SNOOP_DRAM,
1209 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1210 HSW_L3_MISS_REMOTE|
1211 HSW_SNOOP_DRAM,
1212 },
1213 [ C(OP_WRITE) ] = {
1214 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1215 HSW_L3_MISS_LOCAL_DRAM|
1216 HSW_SNOOP_DRAM,
1217 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1218 HSW_L3_MISS_REMOTE|
1219 HSW_SNOOP_DRAM,
1220 },
1221 [ C(OP_PREFETCH) ] = {
1222 [ C(RESULT_ACCESS) ] = 0x0,
1223 [ C(RESULT_MISS) ] = 0x0,
1224 },
1225 },
1226 };
1227
1228 static __initconst const u64 westmere_hw_cache_event_ids
1229 [PERF_COUNT_HW_CACHE_MAX]
1230 [PERF_COUNT_HW_CACHE_OP_MAX]
1231 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1232 {
1233 [ C(L1D) ] = {
1234 [ C(OP_READ) ] = {
1235 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1236 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1237 },
1238 [ C(OP_WRITE) ] = {
1239 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1240 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1241 },
1242 [ C(OP_PREFETCH) ] = {
1243 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1244 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1245 },
1246 },
1247 [ C(L1I ) ] = {
1248 [ C(OP_READ) ] = {
1249 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1250 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1251 },
1252 [ C(OP_WRITE) ] = {
1253 [ C(RESULT_ACCESS) ] = -1,
1254 [ C(RESULT_MISS) ] = -1,
1255 },
1256 [ C(OP_PREFETCH) ] = {
1257 [ C(RESULT_ACCESS) ] = 0x0,
1258 [ C(RESULT_MISS) ] = 0x0,
1259 },
1260 },
1261 [ C(LL ) ] = {
1262 [ C(OP_READ) ] = {
1263 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1264 [ C(RESULT_ACCESS) ] = 0x01b7,
1265 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1266 [ C(RESULT_MISS) ] = 0x01b7,
1267 },
1268 /*
1269 * Use RFO, not WRITEBACK, because a write miss would typically occur
1270 * on RFO.
1271 */
1272 [ C(OP_WRITE) ] = {
1273 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1274 [ C(RESULT_ACCESS) ] = 0x01b7,
1275 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1276 [ C(RESULT_MISS) ] = 0x01b7,
1277 },
1278 [ C(OP_PREFETCH) ] = {
1279 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1280 [ C(RESULT_ACCESS) ] = 0x01b7,
1281 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1282 [ C(RESULT_MISS) ] = 0x01b7,
1283 },
1284 },
1285 [ C(DTLB) ] = {
1286 [ C(OP_READ) ] = {
1287 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1288 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1289 },
1290 [ C(OP_WRITE) ] = {
1291 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1292 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1293 },
1294 [ C(OP_PREFETCH) ] = {
1295 [ C(RESULT_ACCESS) ] = 0x0,
1296 [ C(RESULT_MISS) ] = 0x0,
1297 },
1298 },
1299 [ C(ITLB) ] = {
1300 [ C(OP_READ) ] = {
1301 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1302 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1303 },
1304 [ C(OP_WRITE) ] = {
1305 [ C(RESULT_ACCESS) ] = -1,
1306 [ C(RESULT_MISS) ] = -1,
1307 },
1308 [ C(OP_PREFETCH) ] = {
1309 [ C(RESULT_ACCESS) ] = -1,
1310 [ C(RESULT_MISS) ] = -1,
1311 },
1312 },
1313 [ C(BPU ) ] = {
1314 [ C(OP_READ) ] = {
1315 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1316 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1317 },
1318 [ C(OP_WRITE) ] = {
1319 [ C(RESULT_ACCESS) ] = -1,
1320 [ C(RESULT_MISS) ] = -1,
1321 },
1322 [ C(OP_PREFETCH) ] = {
1323 [ C(RESULT_ACCESS) ] = -1,
1324 [ C(RESULT_MISS) ] = -1,
1325 },
1326 },
1327 [ C(NODE) ] = {
1328 [ C(OP_READ) ] = {
1329 [ C(RESULT_ACCESS) ] = 0x01b7,
1330 [ C(RESULT_MISS) ] = 0x01b7,
1331 },
1332 [ C(OP_WRITE) ] = {
1333 [ C(RESULT_ACCESS) ] = 0x01b7,
1334 [ C(RESULT_MISS) ] = 0x01b7,
1335 },
1336 [ C(OP_PREFETCH) ] = {
1337 [ C(RESULT_ACCESS) ] = 0x01b7,
1338 [ C(RESULT_MISS) ] = 0x01b7,
1339 },
1340 },
1341 };
1342
1343 /*
1344 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1345 * See IA32 SDM Vol 3B 30.6.1.3
1346 */
1347
1348 #define NHM_DMND_DATA_RD (1 << 0)
1349 #define NHM_DMND_RFO (1 << 1)
1350 #define NHM_DMND_IFETCH (1 << 2)
1351 #define NHM_DMND_WB (1 << 3)
1352 #define NHM_PF_DATA_RD (1 << 4)
1353 #define NHM_PF_DATA_RFO (1 << 5)
1354 #define NHM_PF_IFETCH (1 << 6)
1355 #define NHM_OFFCORE_OTHER (1 << 7)
1356 #define NHM_UNCORE_HIT (1 << 8)
1357 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1358 #define NHM_OTHER_CORE_HITM (1 << 10)
1359 /* reserved */
1360 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1361 #define NHM_REMOTE_DRAM (1 << 13)
1362 #define NHM_LOCAL_DRAM (1 << 14)
1363 #define NHM_NON_DRAM (1 << 15)
1364
1365 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1366 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1367
1368 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1369 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1370 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1371
1372 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1373 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1374 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1375
1376 static __initconst const u64 nehalem_hw_cache_extra_regs
1377 [PERF_COUNT_HW_CACHE_MAX]
1378 [PERF_COUNT_HW_CACHE_OP_MAX]
1379 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1380 {
1381 [ C(LL ) ] = {
1382 [ C(OP_READ) ] = {
1383 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1384 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1385 },
1386 [ C(OP_WRITE) ] = {
1387 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1388 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1389 },
1390 [ C(OP_PREFETCH) ] = {
1391 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1392 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1393 },
1394 },
1395 [ C(NODE) ] = {
1396 [ C(OP_READ) ] = {
1397 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1398 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1399 },
1400 [ C(OP_WRITE) ] = {
1401 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1402 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1403 },
1404 [ C(OP_PREFETCH) ] = {
1405 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1406 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1407 },
1408 },
1409 };
1410
1411 static __initconst const u64 nehalem_hw_cache_event_ids
1412 [PERF_COUNT_HW_CACHE_MAX]
1413 [PERF_COUNT_HW_CACHE_OP_MAX]
1414 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1415 {
1416 [ C(L1D) ] = {
1417 [ C(OP_READ) ] = {
1418 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1419 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1420 },
1421 [ C(OP_WRITE) ] = {
1422 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1423 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1424 },
1425 [ C(OP_PREFETCH) ] = {
1426 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1427 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1428 },
1429 },
1430 [ C(L1I ) ] = {
1431 [ C(OP_READ) ] = {
1432 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1433 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1434 },
1435 [ C(OP_WRITE) ] = {
1436 [ C(RESULT_ACCESS) ] = -1,
1437 [ C(RESULT_MISS) ] = -1,
1438 },
1439 [ C(OP_PREFETCH) ] = {
1440 [ C(RESULT_ACCESS) ] = 0x0,
1441 [ C(RESULT_MISS) ] = 0x0,
1442 },
1443 },
1444 [ C(LL ) ] = {
1445 [ C(OP_READ) ] = {
1446 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1447 [ C(RESULT_ACCESS) ] = 0x01b7,
1448 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1449 [ C(RESULT_MISS) ] = 0x01b7,
1450 },
1451 /*
1452 * Use RFO, not WRITEBACK, because a write miss would typically occur
1453 * on RFO.
1454 */
1455 [ C(OP_WRITE) ] = {
1456 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1457 [ C(RESULT_ACCESS) ] = 0x01b7,
1458 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1459 [ C(RESULT_MISS) ] = 0x01b7,
1460 },
1461 [ C(OP_PREFETCH) ] = {
1462 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1463 [ C(RESULT_ACCESS) ] = 0x01b7,
1464 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1465 [ C(RESULT_MISS) ] = 0x01b7,
1466 },
1467 },
1468 [ C(DTLB) ] = {
1469 [ C(OP_READ) ] = {
1470 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1471 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1472 },
1473 [ C(OP_WRITE) ] = {
1474 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1475 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1476 },
1477 [ C(OP_PREFETCH) ] = {
1478 [ C(RESULT_ACCESS) ] = 0x0,
1479 [ C(RESULT_MISS) ] = 0x0,
1480 },
1481 },
1482 [ C(ITLB) ] = {
1483 [ C(OP_READ) ] = {
1484 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1485 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1486 },
1487 [ C(OP_WRITE) ] = {
1488 [ C(RESULT_ACCESS) ] = -1,
1489 [ C(RESULT_MISS) ] = -1,
1490 },
1491 [ C(OP_PREFETCH) ] = {
1492 [ C(RESULT_ACCESS) ] = -1,
1493 [ C(RESULT_MISS) ] = -1,
1494 },
1495 },
1496 [ C(BPU ) ] = {
1497 [ C(OP_READ) ] = {
1498 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1499 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1500 },
1501 [ C(OP_WRITE) ] = {
1502 [ C(RESULT_ACCESS) ] = -1,
1503 [ C(RESULT_MISS) ] = -1,
1504 },
1505 [ C(OP_PREFETCH) ] = {
1506 [ C(RESULT_ACCESS) ] = -1,
1507 [ C(RESULT_MISS) ] = -1,
1508 },
1509 },
1510 [ C(NODE) ] = {
1511 [ C(OP_READ) ] = {
1512 [ C(RESULT_ACCESS) ] = 0x01b7,
1513 [ C(RESULT_MISS) ] = 0x01b7,
1514 },
1515 [ C(OP_WRITE) ] = {
1516 [ C(RESULT_ACCESS) ] = 0x01b7,
1517 [ C(RESULT_MISS) ] = 0x01b7,
1518 },
1519 [ C(OP_PREFETCH) ] = {
1520 [ C(RESULT_ACCESS) ] = 0x01b7,
1521 [ C(RESULT_MISS) ] = 0x01b7,
1522 },
1523 },
1524 };
1525
1526 static __initconst const u64 core2_hw_cache_event_ids
1527 [PERF_COUNT_HW_CACHE_MAX]
1528 [PERF_COUNT_HW_CACHE_OP_MAX]
1529 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1530 {
1531 [ C(L1D) ] = {
1532 [ C(OP_READ) ] = {
1533 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1534 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1535 },
1536 [ C(OP_WRITE) ] = {
1537 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1538 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1539 },
1540 [ C(OP_PREFETCH) ] = {
1541 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1542 [ C(RESULT_MISS) ] = 0,
1543 },
1544 },
1545 [ C(L1I ) ] = {
1546 [ C(OP_READ) ] = {
1547 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1548 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1549 },
1550 [ C(OP_WRITE) ] = {
1551 [ C(RESULT_ACCESS) ] = -1,
1552 [ C(RESULT_MISS) ] = -1,
1553 },
1554 [ C(OP_PREFETCH) ] = {
1555 [ C(RESULT_ACCESS) ] = 0,
1556 [ C(RESULT_MISS) ] = 0,
1557 },
1558 },
1559 [ C(LL ) ] = {
1560 [ C(OP_READ) ] = {
1561 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1562 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1563 },
1564 [ C(OP_WRITE) ] = {
1565 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1566 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1567 },
1568 [ C(OP_PREFETCH) ] = {
1569 [ C(RESULT_ACCESS) ] = 0,
1570 [ C(RESULT_MISS) ] = 0,
1571 },
1572 },
1573 [ C(DTLB) ] = {
1574 [ C(OP_READ) ] = {
1575 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1576 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1577 },
1578 [ C(OP_WRITE) ] = {
1579 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1580 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1581 },
1582 [ C(OP_PREFETCH) ] = {
1583 [ C(RESULT_ACCESS) ] = 0,
1584 [ C(RESULT_MISS) ] = 0,
1585 },
1586 },
1587 [ C(ITLB) ] = {
1588 [ C(OP_READ) ] = {
1589 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1590 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1591 },
1592 [ C(OP_WRITE) ] = {
1593 [ C(RESULT_ACCESS) ] = -1,
1594 [ C(RESULT_MISS) ] = -1,
1595 },
1596 [ C(OP_PREFETCH) ] = {
1597 [ C(RESULT_ACCESS) ] = -1,
1598 [ C(RESULT_MISS) ] = -1,
1599 },
1600 },
1601 [ C(BPU ) ] = {
1602 [ C(OP_READ) ] = {
1603 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1604 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1605 },
1606 [ C(OP_WRITE) ] = {
1607 [ C(RESULT_ACCESS) ] = -1,
1608 [ C(RESULT_MISS) ] = -1,
1609 },
1610 [ C(OP_PREFETCH) ] = {
1611 [ C(RESULT_ACCESS) ] = -1,
1612 [ C(RESULT_MISS) ] = -1,
1613 },
1614 },
1615 };
1616
1617 static __initconst const u64 atom_hw_cache_event_ids
1618 [PERF_COUNT_HW_CACHE_MAX]
1619 [PERF_COUNT_HW_CACHE_OP_MAX]
1620 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1621 {
1622 [ C(L1D) ] = {
1623 [ C(OP_READ) ] = {
1624 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1625 [ C(RESULT_MISS) ] = 0,
1626 },
1627 [ C(OP_WRITE) ] = {
1628 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1629 [ C(RESULT_MISS) ] = 0,
1630 },
1631 [ C(OP_PREFETCH) ] = {
1632 [ C(RESULT_ACCESS) ] = 0x0,
1633 [ C(RESULT_MISS) ] = 0,
1634 },
1635 },
1636 [ C(L1I ) ] = {
1637 [ C(OP_READ) ] = {
1638 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1639 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1640 },
1641 [ C(OP_WRITE) ] = {
1642 [ C(RESULT_ACCESS) ] = -1,
1643 [ C(RESULT_MISS) ] = -1,
1644 },
1645 [ C(OP_PREFETCH) ] = {
1646 [ C(RESULT_ACCESS) ] = 0,
1647 [ C(RESULT_MISS) ] = 0,
1648 },
1649 },
1650 [ C(LL ) ] = {
1651 [ C(OP_READ) ] = {
1652 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1653 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1654 },
1655 [ C(OP_WRITE) ] = {
1656 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1657 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1658 },
1659 [ C(OP_PREFETCH) ] = {
1660 [ C(RESULT_ACCESS) ] = 0,
1661 [ C(RESULT_MISS) ] = 0,
1662 },
1663 },
1664 [ C(DTLB) ] = {
1665 [ C(OP_READ) ] = {
1666 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1667 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1668 },
1669 [ C(OP_WRITE) ] = {
1670 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1671 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1672 },
1673 [ C(OP_PREFETCH) ] = {
1674 [ C(RESULT_ACCESS) ] = 0,
1675 [ C(RESULT_MISS) ] = 0,
1676 },
1677 },
1678 [ C(ITLB) ] = {
1679 [ C(OP_READ) ] = {
1680 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1681 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1682 },
1683 [ C(OP_WRITE) ] = {
1684 [ C(RESULT_ACCESS) ] = -1,
1685 [ C(RESULT_MISS) ] = -1,
1686 },
1687 [ C(OP_PREFETCH) ] = {
1688 [ C(RESULT_ACCESS) ] = -1,
1689 [ C(RESULT_MISS) ] = -1,
1690 },
1691 },
1692 [ C(BPU ) ] = {
1693 [ C(OP_READ) ] = {
1694 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1695 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1696 },
1697 [ C(OP_WRITE) ] = {
1698 [ C(RESULT_ACCESS) ] = -1,
1699 [ C(RESULT_MISS) ] = -1,
1700 },
1701 [ C(OP_PREFETCH) ] = {
1702 [ C(RESULT_ACCESS) ] = -1,
1703 [ C(RESULT_MISS) ] = -1,
1704 },
1705 },
1706 };
1707
1708 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1709 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1710 /* no_alloc_cycles.not_delivered */
1711 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1712 "event=0xca,umask=0x50");
1713 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1714 /* uops_retired.all */
1715 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1716 "event=0xc2,umask=0x10");
1717 /* uops_retired.all */
1718 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1719 "event=0xc2,umask=0x10");
1720
1721 static struct attribute *slm_events_attrs[] = {
1722 EVENT_PTR(td_total_slots_slm),
1723 EVENT_PTR(td_total_slots_scale_slm),
1724 EVENT_PTR(td_fetch_bubbles_slm),
1725 EVENT_PTR(td_fetch_bubbles_scale_slm),
1726 EVENT_PTR(td_slots_issued_slm),
1727 EVENT_PTR(td_slots_retired_slm),
1728 NULL
1729 };
1730
1731 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1732 {
1733 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1734 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1735 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1736 EVENT_EXTRA_END
1737 };
1738
1739 #define SLM_DMND_READ SNB_DMND_DATA_RD
1740 #define SLM_DMND_WRITE SNB_DMND_RFO
1741 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1742
1743 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1744 #define SLM_LLC_ACCESS SNB_RESP_ANY
1745 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1746
1747 static __initconst const u64 slm_hw_cache_extra_regs
1748 [PERF_COUNT_HW_CACHE_MAX]
1749 [PERF_COUNT_HW_CACHE_OP_MAX]
1750 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1751 {
1752 [ C(LL ) ] = {
1753 [ C(OP_READ) ] = {
1754 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1755 [ C(RESULT_MISS) ] = 0,
1756 },
1757 [ C(OP_WRITE) ] = {
1758 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1759 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1760 },
1761 [ C(OP_PREFETCH) ] = {
1762 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1763 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1764 },
1765 },
1766 };
1767
1768 static __initconst const u64 slm_hw_cache_event_ids
1769 [PERF_COUNT_HW_CACHE_MAX]
1770 [PERF_COUNT_HW_CACHE_OP_MAX]
1771 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1772 {
1773 [ C(L1D) ] = {
1774 [ C(OP_READ) ] = {
1775 [ C(RESULT_ACCESS) ] = 0,
1776 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1777 },
1778 [ C(OP_WRITE) ] = {
1779 [ C(RESULT_ACCESS) ] = 0,
1780 [ C(RESULT_MISS) ] = 0,
1781 },
1782 [ C(OP_PREFETCH) ] = {
1783 [ C(RESULT_ACCESS) ] = 0,
1784 [ C(RESULT_MISS) ] = 0,
1785 },
1786 },
1787 [ C(L1I ) ] = {
1788 [ C(OP_READ) ] = {
1789 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1790 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1791 },
1792 [ C(OP_WRITE) ] = {
1793 [ C(RESULT_ACCESS) ] = -1,
1794 [ C(RESULT_MISS) ] = -1,
1795 },
1796 [ C(OP_PREFETCH) ] = {
1797 [ C(RESULT_ACCESS) ] = 0,
1798 [ C(RESULT_MISS) ] = 0,
1799 },
1800 },
1801 [ C(LL ) ] = {
1802 [ C(OP_READ) ] = {
1803 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1804 [ C(RESULT_ACCESS) ] = 0x01b7,
1805 [ C(RESULT_MISS) ] = 0,
1806 },
1807 [ C(OP_WRITE) ] = {
1808 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1809 [ C(RESULT_ACCESS) ] = 0x01b7,
1810 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1811 [ C(RESULT_MISS) ] = 0x01b7,
1812 },
1813 [ C(OP_PREFETCH) ] = {
1814 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1815 [ C(RESULT_ACCESS) ] = 0x01b7,
1816 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1817 [ C(RESULT_MISS) ] = 0x01b7,
1818 },
1819 },
1820 [ C(DTLB) ] = {
1821 [ C(OP_READ) ] = {
1822 [ C(RESULT_ACCESS) ] = 0,
1823 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1824 },
1825 [ C(OP_WRITE) ] = {
1826 [ C(RESULT_ACCESS) ] = 0,
1827 [ C(RESULT_MISS) ] = 0,
1828 },
1829 [ C(OP_PREFETCH) ] = {
1830 [ C(RESULT_ACCESS) ] = 0,
1831 [ C(RESULT_MISS) ] = 0,
1832 },
1833 },
1834 [ C(ITLB) ] = {
1835 [ C(OP_READ) ] = {
1836 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1837 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1838 },
1839 [ C(OP_WRITE) ] = {
1840 [ C(RESULT_ACCESS) ] = -1,
1841 [ C(RESULT_MISS) ] = -1,
1842 },
1843 [ C(OP_PREFETCH) ] = {
1844 [ C(RESULT_ACCESS) ] = -1,
1845 [ C(RESULT_MISS) ] = -1,
1846 },
1847 },
1848 [ C(BPU ) ] = {
1849 [ C(OP_READ) ] = {
1850 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1851 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1852 },
1853 [ C(OP_WRITE) ] = {
1854 [ C(RESULT_ACCESS) ] = -1,
1855 [ C(RESULT_MISS) ] = -1,
1856 },
1857 [ C(OP_PREFETCH) ] = {
1858 [ C(RESULT_ACCESS) ] = -1,
1859 [ C(RESULT_MISS) ] = -1,
1860 },
1861 },
1862 };
1863
1864 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1865 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1866 /* UOPS_NOT_DELIVERED.ANY */
1867 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1868 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1869 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1870 /* UOPS_RETIRED.ANY */
1871 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1872 /* UOPS_ISSUED.ANY */
1873 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1874
1875 static struct attribute *glm_events_attrs[] = {
1876 EVENT_PTR(td_total_slots_glm),
1877 EVENT_PTR(td_total_slots_scale_glm),
1878 EVENT_PTR(td_fetch_bubbles_glm),
1879 EVENT_PTR(td_recovery_bubbles_glm),
1880 EVENT_PTR(td_slots_issued_glm),
1881 EVENT_PTR(td_slots_retired_glm),
1882 NULL
1883 };
1884
1885 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1886 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1887 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1888 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1889 EVENT_EXTRA_END
1890 };
1891
1892 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1893 #define GLM_DEMAND_RFO BIT_ULL(1)
1894 #define GLM_ANY_RESPONSE BIT_ULL(16)
1895 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1896 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1897 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1898 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1899 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1900 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1901 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1902
1903 static __initconst const u64 glm_hw_cache_event_ids
1904 [PERF_COUNT_HW_CACHE_MAX]
1905 [PERF_COUNT_HW_CACHE_OP_MAX]
1906 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1907 [C(L1D)] = {
1908 [C(OP_READ)] = {
1909 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1910 [C(RESULT_MISS)] = 0x0,
1911 },
1912 [C(OP_WRITE)] = {
1913 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1914 [C(RESULT_MISS)] = 0x0,
1915 },
1916 [C(OP_PREFETCH)] = {
1917 [C(RESULT_ACCESS)] = 0x0,
1918 [C(RESULT_MISS)] = 0x0,
1919 },
1920 },
1921 [C(L1I)] = {
1922 [C(OP_READ)] = {
1923 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1924 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1925 },
1926 [C(OP_WRITE)] = {
1927 [C(RESULT_ACCESS)] = -1,
1928 [C(RESULT_MISS)] = -1,
1929 },
1930 [C(OP_PREFETCH)] = {
1931 [C(RESULT_ACCESS)] = 0x0,
1932 [C(RESULT_MISS)] = 0x0,
1933 },
1934 },
1935 [C(LL)] = {
1936 [C(OP_READ)] = {
1937 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1938 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1939 },
1940 [C(OP_WRITE)] = {
1941 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1942 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1943 },
1944 [C(OP_PREFETCH)] = {
1945 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1946 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1947 },
1948 },
1949 [C(DTLB)] = {
1950 [C(OP_READ)] = {
1951 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1952 [C(RESULT_MISS)] = 0x0,
1953 },
1954 [C(OP_WRITE)] = {
1955 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1956 [C(RESULT_MISS)] = 0x0,
1957 },
1958 [C(OP_PREFETCH)] = {
1959 [C(RESULT_ACCESS)] = 0x0,
1960 [C(RESULT_MISS)] = 0x0,
1961 },
1962 },
1963 [C(ITLB)] = {
1964 [C(OP_READ)] = {
1965 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1966 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1967 },
1968 [C(OP_WRITE)] = {
1969 [C(RESULT_ACCESS)] = -1,
1970 [C(RESULT_MISS)] = -1,
1971 },
1972 [C(OP_PREFETCH)] = {
1973 [C(RESULT_ACCESS)] = -1,
1974 [C(RESULT_MISS)] = -1,
1975 },
1976 },
1977 [C(BPU)] = {
1978 [C(OP_READ)] = {
1979 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1980 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1981 },
1982 [C(OP_WRITE)] = {
1983 [C(RESULT_ACCESS)] = -1,
1984 [C(RESULT_MISS)] = -1,
1985 },
1986 [C(OP_PREFETCH)] = {
1987 [C(RESULT_ACCESS)] = -1,
1988 [C(RESULT_MISS)] = -1,
1989 },
1990 },
1991 };
1992
1993 static __initconst const u64 glm_hw_cache_extra_regs
1994 [PERF_COUNT_HW_CACHE_MAX]
1995 [PERF_COUNT_HW_CACHE_OP_MAX]
1996 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1997 [C(LL)] = {
1998 [C(OP_READ)] = {
1999 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2000 GLM_LLC_ACCESS,
2001 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2002 GLM_LLC_MISS,
2003 },
2004 [C(OP_WRITE)] = {
2005 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2006 GLM_LLC_ACCESS,
2007 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2008 GLM_LLC_MISS,
2009 },
2010 [C(OP_PREFETCH)] = {
2011 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
2012 GLM_LLC_ACCESS,
2013 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
2014 GLM_LLC_MISS,
2015 },
2016 },
2017 };
2018
2019 static __initconst const u64 glp_hw_cache_event_ids
2020 [PERF_COUNT_HW_CACHE_MAX]
2021 [PERF_COUNT_HW_CACHE_OP_MAX]
2022 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2023 [C(L1D)] = {
2024 [C(OP_READ)] = {
2025 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2026 [C(RESULT_MISS)] = 0x0,
2027 },
2028 [C(OP_WRITE)] = {
2029 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2030 [C(RESULT_MISS)] = 0x0,
2031 },
2032 [C(OP_PREFETCH)] = {
2033 [C(RESULT_ACCESS)] = 0x0,
2034 [C(RESULT_MISS)] = 0x0,
2035 },
2036 },
2037 [C(L1I)] = {
2038 [C(OP_READ)] = {
2039 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
2040 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
2041 },
2042 [C(OP_WRITE)] = {
2043 [C(RESULT_ACCESS)] = -1,
2044 [C(RESULT_MISS)] = -1,
2045 },
2046 [C(OP_PREFETCH)] = {
2047 [C(RESULT_ACCESS)] = 0x0,
2048 [C(RESULT_MISS)] = 0x0,
2049 },
2050 },
2051 [C(LL)] = {
2052 [C(OP_READ)] = {
2053 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2054 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2055 },
2056 [C(OP_WRITE)] = {
2057 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
2058 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2059 },
2060 [C(OP_PREFETCH)] = {
2061 [C(RESULT_ACCESS)] = 0x0,
2062 [C(RESULT_MISS)] = 0x0,
2063 },
2064 },
2065 [C(DTLB)] = {
2066 [C(OP_READ)] = {
2067 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2068 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
2069 },
2070 [C(OP_WRITE)] = {
2071 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2072 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2073 },
2074 [C(OP_PREFETCH)] = {
2075 [C(RESULT_ACCESS)] = 0x0,
2076 [C(RESULT_MISS)] = 0x0,
2077 },
2078 },
2079 [C(ITLB)] = {
2080 [C(OP_READ)] = {
2081 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2082 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2083 },
2084 [C(OP_WRITE)] = {
2085 [C(RESULT_ACCESS)] = -1,
2086 [C(RESULT_MISS)] = -1,
2087 },
2088 [C(OP_PREFETCH)] = {
2089 [C(RESULT_ACCESS)] = -1,
2090 [C(RESULT_MISS)] = -1,
2091 },
2092 },
2093 [C(BPU)] = {
2094 [C(OP_READ)] = {
2095 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2096 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2097 },
2098 [C(OP_WRITE)] = {
2099 [C(RESULT_ACCESS)] = -1,
2100 [C(RESULT_MISS)] = -1,
2101 },
2102 [C(OP_PREFETCH)] = {
2103 [C(RESULT_ACCESS)] = -1,
2104 [C(RESULT_MISS)] = -1,
2105 },
2106 },
2107 };
2108
2109 static __initconst const u64 glp_hw_cache_extra_regs
2110 [PERF_COUNT_HW_CACHE_MAX]
2111 [PERF_COUNT_HW_CACHE_OP_MAX]
2112 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2113 [C(LL)] = {
2114 [C(OP_READ)] = {
2115 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2116 GLM_LLC_ACCESS,
2117 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2118 GLM_LLC_MISS,
2119 },
2120 [C(OP_WRITE)] = {
2121 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2122 GLM_LLC_ACCESS,
2123 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2124 GLM_LLC_MISS,
2125 },
2126 [C(OP_PREFETCH)] = {
2127 [C(RESULT_ACCESS)] = 0x0,
2128 [C(RESULT_MISS)] = 0x0,
2129 },
2130 },
2131 };
2132
2133 #define TNT_LOCAL_DRAM BIT_ULL(26)
2134 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2135 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2136 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2137 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2138 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2139 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2140
2141 static __initconst const u64 tnt_hw_cache_extra_regs
2142 [PERF_COUNT_HW_CACHE_MAX]
2143 [PERF_COUNT_HW_CACHE_OP_MAX]
2144 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2145 [C(LL)] = {
2146 [C(OP_READ)] = {
2147 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2148 TNT_LLC_ACCESS,
2149 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2150 TNT_LLC_MISS,
2151 },
2152 [C(OP_WRITE)] = {
2153 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2154 TNT_LLC_ACCESS,
2155 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2156 TNT_LLC_MISS,
2157 },
2158 [C(OP_PREFETCH)] = {
2159 [C(RESULT_ACCESS)] = 0x0,
2160 [C(RESULT_MISS)] = 0x0,
2161 },
2162 },
2163 };
2164
2165 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2166 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2167 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2168 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2169
2170 static struct attribute *tnt_events_attrs[] = {
2171 EVENT_PTR(td_fe_bound_tnt),
2172 EVENT_PTR(td_retiring_tnt),
2173 EVENT_PTR(td_bad_spec_tnt),
2174 EVENT_PTR(td_be_bound_tnt),
2175 NULL,
2176 };
2177
2178 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2179 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2180 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2181 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2182 EVENT_EXTRA_END
2183 };
2184
2185 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2186 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2187
2188 static struct attribute *grt_mem_attrs[] = {
2189 EVENT_PTR(mem_ld_grt),
2190 EVENT_PTR(mem_st_grt),
2191 NULL
2192 };
2193
2194 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2195 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2196 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2197 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2198 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2199 EVENT_EXTRA_END
2200 };
2201
2202 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
2203 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
2204
2205 static struct attribute *cmt_events_attrs[] = {
2206 EVENT_PTR(td_fe_bound_tnt),
2207 EVENT_PTR(td_retiring_cmt),
2208 EVENT_PTR(td_bad_spec_cmt),
2209 EVENT_PTR(td_be_bound_tnt),
2210 NULL
2211 };
2212
2213 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2214 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2215 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2216 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2217 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2218 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2219 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2220 EVENT_EXTRA_END
2221 };
2222
2223 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2224 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2225 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2226 #define KNL_MCDRAM_FAR BIT_ULL(22)
2227 #define KNL_DDR_LOCAL BIT_ULL(23)
2228 #define KNL_DDR_FAR BIT_ULL(24)
2229 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2230 KNL_DDR_LOCAL | KNL_DDR_FAR)
2231 #define KNL_L2_READ SLM_DMND_READ
2232 #define KNL_L2_WRITE SLM_DMND_WRITE
2233 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2234 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2235 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2236 KNL_DRAM_ANY | SNB_SNP_ANY | \
2237 SNB_NON_DRAM)
2238
2239 static __initconst const u64 knl_hw_cache_extra_regs
2240 [PERF_COUNT_HW_CACHE_MAX]
2241 [PERF_COUNT_HW_CACHE_OP_MAX]
2242 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2243 [C(LL)] = {
2244 [C(OP_READ)] = {
2245 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2246 [C(RESULT_MISS)] = 0,
2247 },
2248 [C(OP_WRITE)] = {
2249 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2250 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2251 },
2252 [C(OP_PREFETCH)] = {
2253 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2254 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2255 },
2256 },
2257 };
2258
2259 /*
2260 * Used from PMIs where the LBRs are already disabled.
2261 *
2262 * This function could be called consecutively. It is required to remain in
2263 * disabled state if called consecutively.
2264 *
2265 * During consecutive calls, the same disable value will be written to related
2266 * registers, so the PMU state remains unchanged.
2267 *
2268 * intel_bts events don't coexist with intel PMU's BTS events because of
2269 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2270 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2271 *
2272 * Avoid PEBS_ENABLE MSR access in PMIs.
2273 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2274 * It doesn't matter if the PEBS is enabled or not.
2275 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2276 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2277 * However, there are some cases which may change PEBS status, e.g. PMI
2278 * throttle. The PEBS_ENABLE should be updated where the status changes.
2279 */
__intel_pmu_disable_all(bool bts)2280 static __always_inline void __intel_pmu_disable_all(bool bts)
2281 {
2282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2283
2284 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2285
2286 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2287 intel_pmu_disable_bts();
2288 }
2289
intel_pmu_disable_all(void)2290 static __always_inline void intel_pmu_disable_all(void)
2291 {
2292 __intel_pmu_disable_all(true);
2293 intel_pmu_pebs_disable_all();
2294 intel_pmu_lbr_disable_all();
2295 }
2296
__intel_pmu_enable_all(int added,bool pmi)2297 static void __intel_pmu_enable_all(int added, bool pmi)
2298 {
2299 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2300 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2301
2302 intel_pmu_lbr_enable_all(pmi);
2303
2304 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2305 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2306 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2307 }
2308
2309 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2310 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2311
2312 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2313 struct perf_event *event =
2314 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2315
2316 if (WARN_ON_ONCE(!event))
2317 return;
2318
2319 intel_pmu_enable_bts(event->hw.config);
2320 }
2321 }
2322
intel_pmu_enable_all(int added)2323 static void intel_pmu_enable_all(int added)
2324 {
2325 intel_pmu_pebs_enable_all();
2326 __intel_pmu_enable_all(added, false);
2327 }
2328
2329 static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt,unsigned long flags)2330 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2331 unsigned int cnt, unsigned long flags)
2332 {
2333 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2334
2335 intel_pmu_lbr_read();
2336 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2337
2338 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2339 intel_pmu_enable_all(0);
2340 local_irq_restore(flags);
2341 return cnt;
2342 }
2343
2344 static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2345 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2346 {
2347 unsigned long flags;
2348
2349 /* must not have branches... */
2350 local_irq_save(flags);
2351 __intel_pmu_disable_all(false); /* we don't care about BTS */
2352 __intel_pmu_lbr_disable();
2353 /* ... until here */
2354 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2355 }
2356
2357 static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry * entries,unsigned int cnt)2358 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2359 {
2360 unsigned long flags;
2361
2362 /* must not have branches... */
2363 local_irq_save(flags);
2364 __intel_pmu_disable_all(false); /* we don't care about BTS */
2365 __intel_pmu_arch_lbr_disable();
2366 /* ... until here */
2367 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2368 }
2369
2370 /*
2371 * Workaround for:
2372 * Intel Errata AAK100 (model 26)
2373 * Intel Errata AAP53 (model 30)
2374 * Intel Errata BD53 (model 44)
2375 *
2376 * The official story:
2377 * These chips need to be 'reset' when adding counters by programming the
2378 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2379 * in sequence on the same PMC or on different PMCs.
2380 *
2381 * In practice it appears some of these events do in fact count, and
2382 * we need to program all 4 events.
2383 */
intel_pmu_nhm_workaround(void)2384 static void intel_pmu_nhm_workaround(void)
2385 {
2386 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2387 static const unsigned long nhm_magic[4] = {
2388 0x4300B5,
2389 0x4300D2,
2390 0x4300B1,
2391 0x4300B1
2392 };
2393 struct perf_event *event;
2394 int i;
2395
2396 /*
2397 * The Errata requires below steps:
2398 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2399 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2400 * the corresponding PMCx;
2401 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2402 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2403 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2404 */
2405
2406 /*
2407 * The real steps we choose are a little different from above.
2408 * A) To reduce MSR operations, we don't run step 1) as they
2409 * are already cleared before this function is called;
2410 * B) Call x86_perf_event_update to save PMCx before configuring
2411 * PERFEVTSELx with magic number;
2412 * C) With step 5), we do clear only when the PERFEVTSELx is
2413 * not used currently.
2414 * D) Call x86_perf_event_set_period to restore PMCx;
2415 */
2416
2417 /* We always operate 4 pairs of PERF Counters */
2418 for (i = 0; i < 4; i++) {
2419 event = cpuc->events[i];
2420 if (event)
2421 static_call(x86_pmu_update)(event);
2422 }
2423
2424 for (i = 0; i < 4; i++) {
2425 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2426 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2427 }
2428
2429 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2430 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2431
2432 for (i = 0; i < 4; i++) {
2433 event = cpuc->events[i];
2434
2435 if (event) {
2436 static_call(x86_pmu_set_period)(event);
2437 __x86_pmu_enable_event(&event->hw,
2438 ARCH_PERFMON_EVENTSEL_ENABLE);
2439 } else
2440 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2441 }
2442 }
2443
intel_pmu_nhm_enable_all(int added)2444 static void intel_pmu_nhm_enable_all(int added)
2445 {
2446 if (added)
2447 intel_pmu_nhm_workaround();
2448 intel_pmu_enable_all(added);
2449 }
2450
intel_set_tfa(struct cpu_hw_events * cpuc,bool on)2451 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2452 {
2453 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2454
2455 if (cpuc->tfa_shadow != val) {
2456 cpuc->tfa_shadow = val;
2457 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2458 }
2459 }
2460
intel_tfa_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)2461 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2462 {
2463 /*
2464 * We're going to use PMC3, make sure TFA is set before we touch it.
2465 */
2466 if (cntr == 3)
2467 intel_set_tfa(cpuc, true);
2468 }
2469
intel_tfa_pmu_enable_all(int added)2470 static void intel_tfa_pmu_enable_all(int added)
2471 {
2472 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2473
2474 /*
2475 * If we find PMC3 is no longer used when we enable the PMU, we can
2476 * clear TFA.
2477 */
2478 if (!test_bit(3, cpuc->active_mask))
2479 intel_set_tfa(cpuc, false);
2480
2481 intel_pmu_enable_all(added);
2482 }
2483
intel_pmu_get_status(void)2484 static inline u64 intel_pmu_get_status(void)
2485 {
2486 u64 status;
2487
2488 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2489
2490 return status;
2491 }
2492
intel_pmu_ack_status(u64 ack)2493 static inline void intel_pmu_ack_status(u64 ack)
2494 {
2495 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2496 }
2497
event_is_checkpointed(struct perf_event * event)2498 static inline bool event_is_checkpointed(struct perf_event *event)
2499 {
2500 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2501 }
2502
intel_set_masks(struct perf_event * event,int idx)2503 static inline void intel_set_masks(struct perf_event *event, int idx)
2504 {
2505 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2506
2507 if (event->attr.exclude_host)
2508 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2509 if (event->attr.exclude_guest)
2510 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2511 if (event_is_checkpointed(event))
2512 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2513 }
2514
intel_clear_masks(struct perf_event * event,int idx)2515 static inline void intel_clear_masks(struct perf_event *event, int idx)
2516 {
2517 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2518
2519 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2520 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2521 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2522 }
2523
intel_pmu_disable_fixed(struct perf_event * event)2524 static void intel_pmu_disable_fixed(struct perf_event *event)
2525 {
2526 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2527 struct hw_perf_event *hwc = &event->hw;
2528 int idx = hwc->idx;
2529 u64 mask;
2530
2531 if (is_topdown_idx(idx)) {
2532 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2533
2534 /*
2535 * When there are other active TopDown events,
2536 * don't disable the fixed counter 3.
2537 */
2538 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2539 return;
2540 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2541 }
2542
2543 intel_clear_masks(event, idx);
2544
2545 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2546 cpuc->fixed_ctrl_val &= ~mask;
2547 }
2548
intel_pmu_disable_event(struct perf_event * event)2549 static void intel_pmu_disable_event(struct perf_event *event)
2550 {
2551 struct hw_perf_event *hwc = &event->hw;
2552 int idx = hwc->idx;
2553
2554 switch (idx) {
2555 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2556 intel_clear_masks(event, idx);
2557 x86_pmu_disable_event(event);
2558 break;
2559 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2560 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2561 intel_pmu_disable_fixed(event);
2562 break;
2563 case INTEL_PMC_IDX_FIXED_BTS:
2564 intel_pmu_disable_bts();
2565 intel_pmu_drain_bts_buffer();
2566 return;
2567 case INTEL_PMC_IDX_FIXED_VLBR:
2568 intel_clear_masks(event, idx);
2569 break;
2570 default:
2571 intel_clear_masks(event, idx);
2572 pr_warn("Failed to disable the event with invalid index %d\n",
2573 idx);
2574 return;
2575 }
2576
2577 /*
2578 * Needs to be called after x86_pmu_disable_event,
2579 * so we don't trigger the event without PEBS bit set.
2580 */
2581 if (unlikely(event->attr.precise_ip))
2582 intel_pmu_pebs_disable(event);
2583 }
2584
intel_pmu_assign_event(struct perf_event * event,int idx)2585 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2586 {
2587 if (is_pebs_pt(event))
2588 perf_report_aux_output_id(event, idx);
2589 }
2590
intel_pmu_needs_branch_stack(struct perf_event * event)2591 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2592 {
2593 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2594 }
2595
intel_pmu_del_event(struct perf_event * event)2596 static void intel_pmu_del_event(struct perf_event *event)
2597 {
2598 if (intel_pmu_needs_branch_stack(event))
2599 intel_pmu_lbr_del(event);
2600 if (event->attr.precise_ip)
2601 intel_pmu_pebs_del(event);
2602 }
2603
icl_set_topdown_event_period(struct perf_event * event)2604 static int icl_set_topdown_event_period(struct perf_event *event)
2605 {
2606 struct hw_perf_event *hwc = &event->hw;
2607 s64 left = local64_read(&hwc->period_left);
2608
2609 /*
2610 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2611 * Software should start both registers, PERF_METRICS and fixed
2612 * counter 3, from zero.
2613 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2614 * After that, both MSRs will be cleared for each read.
2615 * Don't need to clear them again.
2616 */
2617 if (left == x86_pmu.max_period) {
2618 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2619 wrmsrl(MSR_PERF_METRICS, 0);
2620 hwc->saved_slots = 0;
2621 hwc->saved_metric = 0;
2622 }
2623
2624 if ((hwc->saved_slots) && is_slots_event(event)) {
2625 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2626 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2627 }
2628
2629 perf_event_update_userpage(event);
2630
2631 return 0;
2632 }
2633
2634 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2635
icl_get_metrics_event_value(u64 metric,u64 slots,int idx)2636 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2637 {
2638 u32 val;
2639
2640 /*
2641 * The metric is reported as an 8bit integer fraction
2642 * summing up to 0xff.
2643 * slots-in-metric = (Metric / 0xff) * slots
2644 */
2645 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2646 return mul_u64_u32_div(slots, val, 0xff);
2647 }
2648
icl_get_topdown_value(struct perf_event * event,u64 slots,u64 metrics)2649 static u64 icl_get_topdown_value(struct perf_event *event,
2650 u64 slots, u64 metrics)
2651 {
2652 int idx = event->hw.idx;
2653 u64 delta;
2654
2655 if (is_metric_idx(idx))
2656 delta = icl_get_metrics_event_value(metrics, slots, idx);
2657 else
2658 delta = slots;
2659
2660 return delta;
2661 }
2662
__icl_update_topdown_event(struct perf_event * event,u64 slots,u64 metrics,u64 last_slots,u64 last_metrics)2663 static void __icl_update_topdown_event(struct perf_event *event,
2664 u64 slots, u64 metrics,
2665 u64 last_slots, u64 last_metrics)
2666 {
2667 u64 delta, last = 0;
2668
2669 delta = icl_get_topdown_value(event, slots, metrics);
2670 if (last_slots)
2671 last = icl_get_topdown_value(event, last_slots, last_metrics);
2672
2673 /*
2674 * The 8bit integer fraction of metric may be not accurate,
2675 * especially when the changes is very small.
2676 * For example, if only a few bad_spec happens, the fraction
2677 * may be reduced from 1 to 0. If so, the bad_spec event value
2678 * will be 0 which is definitely less than the last value.
2679 * Avoid update event->count for this case.
2680 */
2681 if (delta > last) {
2682 delta -= last;
2683 local64_add(delta, &event->count);
2684 }
2685 }
2686
update_saved_topdown_regs(struct perf_event * event,u64 slots,u64 metrics,int metric_end)2687 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2688 u64 metrics, int metric_end)
2689 {
2690 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2691 struct perf_event *other;
2692 int idx;
2693
2694 event->hw.saved_slots = slots;
2695 event->hw.saved_metric = metrics;
2696
2697 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2698 if (!is_topdown_idx(idx))
2699 continue;
2700 other = cpuc->events[idx];
2701 other->hw.saved_slots = slots;
2702 other->hw.saved_metric = metrics;
2703 }
2704 }
2705
2706 /*
2707 * Update all active Topdown events.
2708 *
2709 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2710 * modify by a NMI. PMU has to be disabled before calling this function.
2711 */
2712
intel_update_topdown_event(struct perf_event * event,int metric_end)2713 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2714 {
2715 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2716 struct perf_event *other;
2717 u64 slots, metrics;
2718 bool reset = true;
2719 int idx;
2720
2721 /* read Fixed counter 3 */
2722 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2723 if (!slots)
2724 return 0;
2725
2726 /* read PERF_METRICS */
2727 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2728
2729 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2730 if (!is_topdown_idx(idx))
2731 continue;
2732 other = cpuc->events[idx];
2733 __icl_update_topdown_event(other, slots, metrics,
2734 event ? event->hw.saved_slots : 0,
2735 event ? event->hw.saved_metric : 0);
2736 }
2737
2738 /*
2739 * Check and update this event, which may have been cleared
2740 * in active_mask e.g. x86_pmu_stop()
2741 */
2742 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2743 __icl_update_topdown_event(event, slots, metrics,
2744 event->hw.saved_slots,
2745 event->hw.saved_metric);
2746
2747 /*
2748 * In x86_pmu_stop(), the event is cleared in active_mask first,
2749 * then drain the delta, which indicates context switch for
2750 * counting.
2751 * Save metric and slots for context switch.
2752 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2753 * Because the values will be restored in next schedule in.
2754 */
2755 update_saved_topdown_regs(event, slots, metrics, metric_end);
2756 reset = false;
2757 }
2758
2759 if (reset) {
2760 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2761 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2762 wrmsrl(MSR_PERF_METRICS, 0);
2763 if (event)
2764 update_saved_topdown_regs(event, 0, 0, metric_end);
2765 }
2766
2767 return slots;
2768 }
2769
icl_update_topdown_event(struct perf_event * event)2770 static u64 icl_update_topdown_event(struct perf_event *event)
2771 {
2772 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2773 x86_pmu.num_topdown_events - 1);
2774 }
2775
2776 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
2777
intel_pmu_read_topdown_event(struct perf_event * event)2778 static void intel_pmu_read_topdown_event(struct perf_event *event)
2779 {
2780 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2781
2782 /* Only need to call update_topdown_event() once for group read. */
2783 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2784 !is_slots_event(event))
2785 return;
2786
2787 perf_pmu_disable(event->pmu);
2788 static_call(intel_pmu_update_topdown_event)(event);
2789 perf_pmu_enable(event->pmu);
2790 }
2791
intel_pmu_read_event(struct perf_event * event)2792 static void intel_pmu_read_event(struct perf_event *event)
2793 {
2794 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2795 intel_pmu_auto_reload_read(event);
2796 else if (is_topdown_count(event))
2797 intel_pmu_read_topdown_event(event);
2798 else
2799 x86_perf_event_update(event);
2800 }
2801
intel_pmu_enable_fixed(struct perf_event * event)2802 static void intel_pmu_enable_fixed(struct perf_event *event)
2803 {
2804 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2805 struct hw_perf_event *hwc = &event->hw;
2806 u64 mask, bits = 0;
2807 int idx = hwc->idx;
2808
2809 if (is_topdown_idx(idx)) {
2810 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2811 /*
2812 * When there are other active TopDown events,
2813 * don't enable the fixed counter 3 again.
2814 */
2815 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2816 return;
2817
2818 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2819 }
2820
2821 intel_set_masks(event, idx);
2822
2823 /*
2824 * Enable IRQ generation (0x8), if not PEBS,
2825 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2826 * if requested:
2827 */
2828 if (!event->attr.precise_ip)
2829 bits |= INTEL_FIXED_0_ENABLE_PMI;
2830 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2831 bits |= INTEL_FIXED_0_USER;
2832 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2833 bits |= INTEL_FIXED_0_KERNEL;
2834
2835 /*
2836 * ANY bit is supported in v3 and up
2837 */
2838 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2839 bits |= INTEL_FIXED_0_ANYTHREAD;
2840
2841 idx -= INTEL_PMC_IDX_FIXED;
2842 bits = intel_fixed_bits_by_idx(idx, bits);
2843 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2844
2845 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2846 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2847 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2848 }
2849
2850 cpuc->fixed_ctrl_val &= ~mask;
2851 cpuc->fixed_ctrl_val |= bits;
2852 }
2853
intel_pmu_enable_event(struct perf_event * event)2854 static void intel_pmu_enable_event(struct perf_event *event)
2855 {
2856 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
2857 struct hw_perf_event *hwc = &event->hw;
2858 int idx = hwc->idx;
2859
2860 if (unlikely(event->attr.precise_ip))
2861 intel_pmu_pebs_enable(event);
2862
2863 switch (idx) {
2864 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2865 if (branch_sample_counters(event))
2866 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
2867 intel_set_masks(event, idx);
2868 __x86_pmu_enable_event(hwc, enable_mask);
2869 break;
2870 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2871 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2872 intel_pmu_enable_fixed(event);
2873 break;
2874 case INTEL_PMC_IDX_FIXED_BTS:
2875 if (!__this_cpu_read(cpu_hw_events.enabled))
2876 return;
2877 intel_pmu_enable_bts(hwc->config);
2878 break;
2879 case INTEL_PMC_IDX_FIXED_VLBR:
2880 intel_set_masks(event, idx);
2881 break;
2882 default:
2883 pr_warn("Failed to enable the event with invalid index %d\n",
2884 idx);
2885 }
2886 }
2887
intel_pmu_add_event(struct perf_event * event)2888 static void intel_pmu_add_event(struct perf_event *event)
2889 {
2890 if (event->attr.precise_ip)
2891 intel_pmu_pebs_add(event);
2892 if (intel_pmu_needs_branch_stack(event))
2893 intel_pmu_lbr_add(event);
2894 }
2895
2896 /*
2897 * Save and restart an expired event. Called by NMI contexts,
2898 * so it has to be careful about preempting normal event ops:
2899 */
intel_pmu_save_and_restart(struct perf_event * event)2900 int intel_pmu_save_and_restart(struct perf_event *event)
2901 {
2902 static_call(x86_pmu_update)(event);
2903 /*
2904 * For a checkpointed counter always reset back to 0. This
2905 * avoids a situation where the counter overflows, aborts the
2906 * transaction and is then set back to shortly before the
2907 * overflow, and overflows and aborts again.
2908 */
2909 if (unlikely(event_is_checkpointed(event))) {
2910 /* No race with NMIs because the counter should not be armed */
2911 wrmsrl(event->hw.event_base, 0);
2912 local64_set(&event->hw.prev_count, 0);
2913 }
2914 return static_call(x86_pmu_set_period)(event);
2915 }
2916
intel_pmu_set_period(struct perf_event * event)2917 static int intel_pmu_set_period(struct perf_event *event)
2918 {
2919 if (unlikely(is_topdown_count(event)))
2920 return static_call(intel_pmu_set_topdown_event_period)(event);
2921
2922 return x86_perf_event_set_period(event);
2923 }
2924
intel_pmu_update(struct perf_event * event)2925 static u64 intel_pmu_update(struct perf_event *event)
2926 {
2927 if (unlikely(is_topdown_count(event)))
2928 return static_call(intel_pmu_update_topdown_event)(event);
2929
2930 return x86_perf_event_update(event);
2931 }
2932
intel_pmu_reset(void)2933 static void intel_pmu_reset(void)
2934 {
2935 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2936 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2937 unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
2938 unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
2939 unsigned long flags;
2940 int idx;
2941
2942 if (!*(u64 *)cntr_mask)
2943 return;
2944
2945 local_irq_save(flags);
2946
2947 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2948
2949 for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
2950 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2951 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2952 }
2953 for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
2954 if (fixed_counter_disabled(idx, cpuc->pmu))
2955 continue;
2956 wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
2957 }
2958
2959 if (ds)
2960 ds->bts_index = ds->bts_buffer_base;
2961
2962 /* Ack all overflows and disable fixed counters */
2963 if (x86_pmu.version >= 2) {
2964 intel_pmu_ack_status(intel_pmu_get_status());
2965 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2966 }
2967
2968 /* Reset LBRs and LBR freezing */
2969 if (x86_pmu.lbr_nr) {
2970 update_debugctlmsr(get_debugctlmsr() &
2971 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2972 }
2973
2974 local_irq_restore(flags);
2975 }
2976
2977 /*
2978 * We may be running with guest PEBS events created by KVM, and the
2979 * PEBS records are logged into the guest's DS and invisible to host.
2980 *
2981 * In the case of guest PEBS overflow, we only trigger a fake event
2982 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2983 * The guest will then vm-entry and check the guest DS area to read
2984 * the guest PEBS records.
2985 *
2986 * The contents and other behavior of the guest event do not matter.
2987 */
x86_pmu_handle_guest_pebs(struct pt_regs * regs,struct perf_sample_data * data)2988 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2989 struct perf_sample_data *data)
2990 {
2991 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2992 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
2993 struct perf_event *event = NULL;
2994 int bit;
2995
2996 if (!unlikely(perf_guest_state()))
2997 return;
2998
2999 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
3000 !guest_pebs_idxs)
3001 return;
3002
3003 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
3004 event = cpuc->events[bit];
3005 if (!event->attr.precise_ip)
3006 continue;
3007
3008 perf_sample_data_init(data, 0, event->hw.last_period);
3009 if (perf_event_overflow(event, data, regs))
3010 x86_pmu_stop(event, 0);
3011
3012 /* Inject one fake event is enough. */
3013 break;
3014 }
3015 }
3016
handle_pmi_common(struct pt_regs * regs,u64 status)3017 static int handle_pmi_common(struct pt_regs *regs, u64 status)
3018 {
3019 struct perf_sample_data data;
3020 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3021 int bit;
3022 int handled = 0;
3023 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3024
3025 inc_irq_stat(apic_perf_irqs);
3026
3027 /*
3028 * Ignore a range of extra bits in status that do not indicate
3029 * overflow by themselves.
3030 */
3031 status &= ~(GLOBAL_STATUS_COND_CHG |
3032 GLOBAL_STATUS_ASIF |
3033 GLOBAL_STATUS_LBRS_FROZEN);
3034 if (!status)
3035 return 0;
3036 /*
3037 * In case multiple PEBS events are sampled at the same time,
3038 * it is possible to have GLOBAL_STATUS bit 62 set indicating
3039 * PEBS buffer overflow and also seeing at most 3 PEBS counters
3040 * having their bits set in the status register. This is a sign
3041 * that there was at least one PEBS record pending at the time
3042 * of the PMU interrupt. PEBS counters must only be processed
3043 * via the drain_pebs() calls and not via the regular sample
3044 * processing loop coming after that the function, otherwise
3045 * phony regular samples may be generated in the sampling buffer
3046 * not marked with the EXACT tag. Another possibility is to have
3047 * one PEBS event and at least one non-PEBS event which overflows
3048 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
3049 * not be set, yet the overflow status bit for the PEBS counter will
3050 * be on Skylake.
3051 *
3052 * To avoid this problem, we systematically ignore the PEBS-enabled
3053 * counters from the GLOBAL_STATUS mask and we always process PEBS
3054 * events via drain_pebs().
3055 */
3056 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3057
3058 /*
3059 * PEBS overflow sets bit 62 in the global status register
3060 */
3061 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3062 u64 pebs_enabled = cpuc->pebs_enabled;
3063
3064 handled++;
3065 x86_pmu_handle_guest_pebs(regs, &data);
3066 x86_pmu.drain_pebs(regs, &data);
3067 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
3068
3069 /*
3070 * PMI throttle may be triggered, which stops the PEBS event.
3071 * Although cpuc->pebs_enabled is updated accordingly, the
3072 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3073 * cpuc->enabled has been forced to 0 in PMI.
3074 * Update the MSR if pebs_enabled is changed.
3075 */
3076 if (pebs_enabled != cpuc->pebs_enabled)
3077 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3078 }
3079
3080 /*
3081 * Intel PT
3082 */
3083 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3084 handled++;
3085 if (!perf_guest_handle_intel_pt_intr())
3086 intel_pt_interrupt();
3087 }
3088
3089 /*
3090 * Intel Perf metrics
3091 */
3092 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3093 handled++;
3094 static_call(intel_pmu_update_topdown_event)(NULL);
3095 }
3096
3097 /*
3098 * Checkpointed counters can lead to 'spurious' PMIs because the
3099 * rollback caused by the PMI will have cleared the overflow status
3100 * bit. Therefore always force probe these counters.
3101 */
3102 status |= cpuc->intel_cp_status;
3103
3104 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3105 struct perf_event *event = cpuc->events[bit];
3106
3107 handled++;
3108
3109 if (!test_bit(bit, cpuc->active_mask))
3110 continue;
3111
3112 if (!intel_pmu_save_and_restart(event))
3113 continue;
3114
3115 perf_sample_data_init(&data, 0, event->hw.last_period);
3116
3117 if (has_branch_stack(event))
3118 intel_pmu_lbr_save_brstack(&data, cpuc, event);
3119
3120 if (perf_event_overflow(event, &data, regs))
3121 x86_pmu_stop(event, 0);
3122 }
3123
3124 return handled;
3125 }
3126
3127 /*
3128 * This handler is triggered by the local APIC, so the APIC IRQ handling
3129 * rules apply:
3130 */
intel_pmu_handle_irq(struct pt_regs * regs)3131 static int intel_pmu_handle_irq(struct pt_regs *regs)
3132 {
3133 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3134 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3135 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3136 int loops;
3137 u64 status;
3138 int handled;
3139 int pmu_enabled;
3140
3141 /*
3142 * Save the PMU state.
3143 * It needs to be restored when leaving the handler.
3144 */
3145 pmu_enabled = cpuc->enabled;
3146 /*
3147 * In general, the early ACK is only applied for old platforms.
3148 * For the big core starts from Haswell, the late ACK should be
3149 * applied.
3150 * For the small core after Tremont, we have to do the ACK right
3151 * before re-enabling counters, which is in the middle of the
3152 * NMI handler.
3153 */
3154 if (!late_ack && !mid_ack)
3155 apic_write(APIC_LVTPC, APIC_DM_NMI);
3156 intel_bts_disable_local();
3157 cpuc->enabled = 0;
3158 __intel_pmu_disable_all(true);
3159 handled = intel_pmu_drain_bts_buffer();
3160 handled += intel_bts_interrupt();
3161 status = intel_pmu_get_status();
3162 if (!status)
3163 goto done;
3164
3165 loops = 0;
3166 again:
3167 intel_pmu_lbr_read();
3168 intel_pmu_ack_status(status);
3169 if (++loops > 100) {
3170 static bool warned;
3171
3172 if (!warned) {
3173 WARN(1, "perfevents: irq loop stuck!\n");
3174 perf_event_print_debug();
3175 warned = true;
3176 }
3177 intel_pmu_reset();
3178 goto done;
3179 }
3180
3181 handled += handle_pmi_common(regs, status);
3182
3183 /*
3184 * Repeat if there is more work to be done:
3185 */
3186 status = intel_pmu_get_status();
3187 if (status)
3188 goto again;
3189
3190 done:
3191 if (mid_ack)
3192 apic_write(APIC_LVTPC, APIC_DM_NMI);
3193 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3194 cpuc->enabled = pmu_enabled;
3195 if (pmu_enabled)
3196 __intel_pmu_enable_all(0, true);
3197 intel_bts_enable_local();
3198
3199 /*
3200 * Only unmask the NMI after the overflow counters
3201 * have been reset. This avoids spurious NMIs on
3202 * Haswell CPUs.
3203 */
3204 if (late_ack)
3205 apic_write(APIC_LVTPC, APIC_DM_NMI);
3206 return handled;
3207 }
3208
3209 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)3210 intel_bts_constraints(struct perf_event *event)
3211 {
3212 if (unlikely(intel_pmu_has_bts(event)))
3213 return &bts_constraint;
3214
3215 return NULL;
3216 }
3217
3218 /*
3219 * Note: matches a fake event, like Fixed2.
3220 */
3221 static struct event_constraint *
intel_vlbr_constraints(struct perf_event * event)3222 intel_vlbr_constraints(struct perf_event *event)
3223 {
3224 struct event_constraint *c = &vlbr_constraint;
3225
3226 if (unlikely(constraint_match(c, event->hw.config))) {
3227 event->hw.flags |= c->flags;
3228 return c;
3229 }
3230
3231 return NULL;
3232 }
3233
intel_alt_er(struct cpu_hw_events * cpuc,int idx,u64 config)3234 static int intel_alt_er(struct cpu_hw_events *cpuc,
3235 int idx, u64 config)
3236 {
3237 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3238 int alt_idx = idx;
3239
3240 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3241 return idx;
3242
3243 if (idx == EXTRA_REG_RSP_0)
3244 alt_idx = EXTRA_REG_RSP_1;
3245
3246 if (idx == EXTRA_REG_RSP_1)
3247 alt_idx = EXTRA_REG_RSP_0;
3248
3249 if (config & ~extra_regs[alt_idx].valid_mask)
3250 return idx;
3251
3252 return alt_idx;
3253 }
3254
intel_fixup_er(struct perf_event * event,int idx)3255 static void intel_fixup_er(struct perf_event *event, int idx)
3256 {
3257 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3258 event->hw.extra_reg.idx = idx;
3259
3260 if (idx == EXTRA_REG_RSP_0) {
3261 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3262 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3263 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3264 } else if (idx == EXTRA_REG_RSP_1) {
3265 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3266 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3267 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3268 }
3269 }
3270
3271 /*
3272 * manage allocation of shared extra msr for certain events
3273 *
3274 * sharing can be:
3275 * per-cpu: to be shared between the various events on a single PMU
3276 * per-core: per-cpu + shared by HT threads
3277 */
3278 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct hw_perf_event_extra * reg)3279 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3280 struct perf_event *event,
3281 struct hw_perf_event_extra *reg)
3282 {
3283 struct event_constraint *c = &emptyconstraint;
3284 struct er_account *era;
3285 unsigned long flags;
3286 int idx = reg->idx;
3287
3288 /*
3289 * reg->alloc can be set due to existing state, so for fake cpuc we
3290 * need to ignore this, otherwise we might fail to allocate proper fake
3291 * state for this extra reg constraint. Also see the comment below.
3292 */
3293 if (reg->alloc && !cpuc->is_fake)
3294 return NULL; /* call x86_get_event_constraint() */
3295
3296 again:
3297 era = &cpuc->shared_regs->regs[idx];
3298 /*
3299 * we use spin_lock_irqsave() to avoid lockdep issues when
3300 * passing a fake cpuc
3301 */
3302 raw_spin_lock_irqsave(&era->lock, flags);
3303
3304 if (!atomic_read(&era->ref) || era->config == reg->config) {
3305
3306 /*
3307 * If its a fake cpuc -- as per validate_{group,event}() we
3308 * shouldn't touch event state and we can avoid doing so
3309 * since both will only call get_event_constraints() once
3310 * on each event, this avoids the need for reg->alloc.
3311 *
3312 * Not doing the ER fixup will only result in era->reg being
3313 * wrong, but since we won't actually try and program hardware
3314 * this isn't a problem either.
3315 */
3316 if (!cpuc->is_fake) {
3317 if (idx != reg->idx)
3318 intel_fixup_er(event, idx);
3319
3320 /*
3321 * x86_schedule_events() can call get_event_constraints()
3322 * multiple times on events in the case of incremental
3323 * scheduling(). reg->alloc ensures we only do the ER
3324 * allocation once.
3325 */
3326 reg->alloc = 1;
3327 }
3328
3329 /* lock in msr value */
3330 era->config = reg->config;
3331 era->reg = reg->reg;
3332
3333 /* one more user */
3334 atomic_inc(&era->ref);
3335
3336 /*
3337 * need to call x86_get_event_constraint()
3338 * to check if associated event has constraints
3339 */
3340 c = NULL;
3341 } else {
3342 idx = intel_alt_er(cpuc, idx, reg->config);
3343 if (idx != reg->idx) {
3344 raw_spin_unlock_irqrestore(&era->lock, flags);
3345 goto again;
3346 }
3347 }
3348 raw_spin_unlock_irqrestore(&era->lock, flags);
3349
3350 return c;
3351 }
3352
3353 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)3354 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3355 struct hw_perf_event_extra *reg)
3356 {
3357 struct er_account *era;
3358
3359 /*
3360 * Only put constraint if extra reg was actually allocated. Also takes
3361 * care of event which do not use an extra shared reg.
3362 *
3363 * Also, if this is a fake cpuc we shouldn't touch any event state
3364 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3365 * either since it'll be thrown out.
3366 */
3367 if (!reg->alloc || cpuc->is_fake)
3368 return;
3369
3370 era = &cpuc->shared_regs->regs[reg->idx];
3371
3372 /* one fewer user */
3373 atomic_dec(&era->ref);
3374
3375 /* allocate again next time */
3376 reg->alloc = 0;
3377 }
3378
3379 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3380 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3381 struct perf_event *event)
3382 {
3383 struct event_constraint *c = NULL, *d;
3384 struct hw_perf_event_extra *xreg, *breg;
3385
3386 xreg = &event->hw.extra_reg;
3387 if (xreg->idx != EXTRA_REG_NONE) {
3388 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3389 if (c == &emptyconstraint)
3390 return c;
3391 }
3392 breg = &event->hw.branch_reg;
3393 if (breg->idx != EXTRA_REG_NONE) {
3394 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3395 if (d == &emptyconstraint) {
3396 __intel_shared_reg_put_constraints(cpuc, xreg);
3397 c = d;
3398 }
3399 }
3400 return c;
3401 }
3402
3403 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3404 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3405 struct perf_event *event)
3406 {
3407 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3408 struct event_constraint *c;
3409
3410 if (event_constraints) {
3411 for_each_event_constraint(c, event_constraints) {
3412 if (constraint_match(c, event->hw.config)) {
3413 event->hw.flags |= c->flags;
3414 return c;
3415 }
3416 }
3417 }
3418
3419 return &hybrid_var(cpuc->pmu, unconstrained);
3420 }
3421
3422 static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3423 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3424 struct perf_event *event)
3425 {
3426 struct event_constraint *c;
3427
3428 c = intel_vlbr_constraints(event);
3429 if (c)
3430 return c;
3431
3432 c = intel_bts_constraints(event);
3433 if (c)
3434 return c;
3435
3436 c = intel_shared_regs_constraints(cpuc, event);
3437 if (c)
3438 return c;
3439
3440 c = intel_pebs_constraints(event);
3441 if (c)
3442 return c;
3443
3444 return x86_get_event_constraints(cpuc, idx, event);
3445 }
3446
3447 static void
intel_start_scheduling(struct cpu_hw_events * cpuc)3448 intel_start_scheduling(struct cpu_hw_events *cpuc)
3449 {
3450 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3451 struct intel_excl_states *xl;
3452 int tid = cpuc->excl_thread_id;
3453
3454 /*
3455 * nothing needed if in group validation mode
3456 */
3457 if (cpuc->is_fake || !is_ht_workaround_enabled())
3458 return;
3459
3460 /*
3461 * no exclusion needed
3462 */
3463 if (WARN_ON_ONCE(!excl_cntrs))
3464 return;
3465
3466 xl = &excl_cntrs->states[tid];
3467
3468 xl->sched_started = true;
3469 /*
3470 * lock shared state until we are done scheduling
3471 * in stop_event_scheduling()
3472 * makes scheduling appear as a transaction
3473 */
3474 raw_spin_lock(&excl_cntrs->lock);
3475 }
3476
intel_commit_scheduling(struct cpu_hw_events * cpuc,int idx,int cntr)3477 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3478 {
3479 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3480 struct event_constraint *c = cpuc->event_constraint[idx];
3481 struct intel_excl_states *xl;
3482 int tid = cpuc->excl_thread_id;
3483
3484 if (cpuc->is_fake || !is_ht_workaround_enabled())
3485 return;
3486
3487 if (WARN_ON_ONCE(!excl_cntrs))
3488 return;
3489
3490 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3491 return;
3492
3493 xl = &excl_cntrs->states[tid];
3494
3495 lockdep_assert_held(&excl_cntrs->lock);
3496
3497 if (c->flags & PERF_X86_EVENT_EXCL)
3498 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3499 else
3500 xl->state[cntr] = INTEL_EXCL_SHARED;
3501 }
3502
3503 static void
intel_stop_scheduling(struct cpu_hw_events * cpuc)3504 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3505 {
3506 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3507 struct intel_excl_states *xl;
3508 int tid = cpuc->excl_thread_id;
3509
3510 /*
3511 * nothing needed if in group validation mode
3512 */
3513 if (cpuc->is_fake || !is_ht_workaround_enabled())
3514 return;
3515 /*
3516 * no exclusion needed
3517 */
3518 if (WARN_ON_ONCE(!excl_cntrs))
3519 return;
3520
3521 xl = &excl_cntrs->states[tid];
3522
3523 xl->sched_started = false;
3524 /*
3525 * release shared state lock (acquired in intel_start_scheduling())
3526 */
3527 raw_spin_unlock(&excl_cntrs->lock);
3528 }
3529
3530 static struct event_constraint *
dyn_constraint(struct cpu_hw_events * cpuc,struct event_constraint * c,int idx)3531 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3532 {
3533 WARN_ON_ONCE(!cpuc->constraint_list);
3534
3535 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3536 struct event_constraint *cx;
3537
3538 /*
3539 * grab pre-allocated constraint entry
3540 */
3541 cx = &cpuc->constraint_list[idx];
3542
3543 /*
3544 * initialize dynamic constraint
3545 * with static constraint
3546 */
3547 *cx = *c;
3548
3549 /*
3550 * mark constraint as dynamic
3551 */
3552 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3553 c = cx;
3554 }
3555
3556 return c;
3557 }
3558
3559 static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,int idx,struct event_constraint * c)3560 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3561 int idx, struct event_constraint *c)
3562 {
3563 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3564 struct intel_excl_states *xlo;
3565 int tid = cpuc->excl_thread_id;
3566 int is_excl, i, w;
3567
3568 /*
3569 * validating a group does not require
3570 * enforcing cross-thread exclusion
3571 */
3572 if (cpuc->is_fake || !is_ht_workaround_enabled())
3573 return c;
3574
3575 /*
3576 * no exclusion needed
3577 */
3578 if (WARN_ON_ONCE(!excl_cntrs))
3579 return c;
3580
3581 /*
3582 * because we modify the constraint, we need
3583 * to make a copy. Static constraints come
3584 * from static const tables.
3585 *
3586 * only needed when constraint has not yet
3587 * been cloned (marked dynamic)
3588 */
3589 c = dyn_constraint(cpuc, c, idx);
3590
3591 /*
3592 * From here on, the constraint is dynamic.
3593 * Either it was just allocated above, or it
3594 * was allocated during a earlier invocation
3595 * of this function
3596 */
3597
3598 /*
3599 * state of sibling HT
3600 */
3601 xlo = &excl_cntrs->states[tid ^ 1];
3602
3603 /*
3604 * event requires exclusive counter access
3605 * across HT threads
3606 */
3607 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3608 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3609 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3610 if (!cpuc->n_excl++)
3611 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3612 }
3613
3614 /*
3615 * Modify static constraint with current dynamic
3616 * state of thread
3617 *
3618 * EXCLUSIVE: sibling counter measuring exclusive event
3619 * SHARED : sibling counter measuring non-exclusive event
3620 * UNUSED : sibling counter unused
3621 */
3622 w = c->weight;
3623 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3624 /*
3625 * exclusive event in sibling counter
3626 * our corresponding counter cannot be used
3627 * regardless of our event
3628 */
3629 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3630 __clear_bit(i, c->idxmsk);
3631 w--;
3632 continue;
3633 }
3634 /*
3635 * if measuring an exclusive event, sibling
3636 * measuring non-exclusive, then counter cannot
3637 * be used
3638 */
3639 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3640 __clear_bit(i, c->idxmsk);
3641 w--;
3642 continue;
3643 }
3644 }
3645
3646 /*
3647 * if we return an empty mask, then switch
3648 * back to static empty constraint to avoid
3649 * the cost of freeing later on
3650 */
3651 if (!w)
3652 c = &emptyconstraint;
3653
3654 c->weight = w;
3655
3656 return c;
3657 }
3658
3659 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)3660 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3661 struct perf_event *event)
3662 {
3663 struct event_constraint *c1, *c2;
3664
3665 c1 = cpuc->event_constraint[idx];
3666
3667 /*
3668 * first time only
3669 * - static constraint: no change across incremental scheduling calls
3670 * - dynamic constraint: handled by intel_get_excl_constraints()
3671 */
3672 c2 = __intel_get_event_constraints(cpuc, idx, event);
3673 if (c1) {
3674 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3675 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3676 c1->weight = c2->weight;
3677 c2 = c1;
3678 }
3679
3680 if (cpuc->excl_cntrs)
3681 return intel_get_excl_constraints(cpuc, event, idx, c2);
3682
3683 /* Not all counters support the branch counter feature. */
3684 if (branch_sample_counters(event)) {
3685 c2 = dyn_constraint(cpuc, c2, idx);
3686 c2->idxmsk64 &= x86_pmu.lbr_counters;
3687 c2->weight = hweight64(c2->idxmsk64);
3688 }
3689
3690 return c2;
3691 }
3692
intel_put_excl_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3693 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3694 struct perf_event *event)
3695 {
3696 struct hw_perf_event *hwc = &event->hw;
3697 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3698 int tid = cpuc->excl_thread_id;
3699 struct intel_excl_states *xl;
3700
3701 /*
3702 * nothing needed if in group validation mode
3703 */
3704 if (cpuc->is_fake)
3705 return;
3706
3707 if (WARN_ON_ONCE(!excl_cntrs))
3708 return;
3709
3710 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3711 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3712 if (!--cpuc->n_excl)
3713 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3714 }
3715
3716 /*
3717 * If event was actually assigned, then mark the counter state as
3718 * unused now.
3719 */
3720 if (hwc->idx >= 0) {
3721 xl = &excl_cntrs->states[tid];
3722
3723 /*
3724 * put_constraint may be called from x86_schedule_events()
3725 * which already has the lock held so here make locking
3726 * conditional.
3727 */
3728 if (!xl->sched_started)
3729 raw_spin_lock(&excl_cntrs->lock);
3730
3731 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3732
3733 if (!xl->sched_started)
3734 raw_spin_unlock(&excl_cntrs->lock);
3735 }
3736 }
3737
3738 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3739 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3740 struct perf_event *event)
3741 {
3742 struct hw_perf_event_extra *reg;
3743
3744 reg = &event->hw.extra_reg;
3745 if (reg->idx != EXTRA_REG_NONE)
3746 __intel_shared_reg_put_constraints(cpuc, reg);
3747
3748 reg = &event->hw.branch_reg;
3749 if (reg->idx != EXTRA_REG_NONE)
3750 __intel_shared_reg_put_constraints(cpuc, reg);
3751 }
3752
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)3753 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3754 struct perf_event *event)
3755 {
3756 intel_put_shared_regs_event_constraints(cpuc, event);
3757
3758 /*
3759 * is PMU has exclusive counter restrictions, then
3760 * all events are subject to and must call the
3761 * put_excl_constraints() routine
3762 */
3763 if (cpuc->excl_cntrs)
3764 intel_put_excl_constraints(cpuc, event);
3765 }
3766
intel_pebs_aliases_core2(struct perf_event * event)3767 static void intel_pebs_aliases_core2(struct perf_event *event)
3768 {
3769 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3770 /*
3771 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3772 * (0x003c) so that we can use it with PEBS.
3773 *
3774 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3775 * PEBS capable. However we can use INST_RETIRED.ANY_P
3776 * (0x00c0), which is a PEBS capable event, to get the same
3777 * count.
3778 *
3779 * INST_RETIRED.ANY_P counts the number of cycles that retires
3780 * CNTMASK instructions. By setting CNTMASK to a value (16)
3781 * larger than the maximum number of instructions that can be
3782 * retired per cycle (4) and then inverting the condition, we
3783 * count all cycles that retire 16 or less instructions, which
3784 * is every cycle.
3785 *
3786 * Thereby we gain a PEBS capable cycle counter.
3787 */
3788 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3789
3790 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3791 event->hw.config = alt_config;
3792 }
3793 }
3794
intel_pebs_aliases_snb(struct perf_event * event)3795 static void intel_pebs_aliases_snb(struct perf_event *event)
3796 {
3797 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3798 /*
3799 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3800 * (0x003c) so that we can use it with PEBS.
3801 *
3802 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3803 * PEBS capable. However we can use UOPS_RETIRED.ALL
3804 * (0x01c2), which is a PEBS capable event, to get the same
3805 * count.
3806 *
3807 * UOPS_RETIRED.ALL counts the number of cycles that retires
3808 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3809 * larger than the maximum number of micro-ops that can be
3810 * retired per cycle (4) and then inverting the condition, we
3811 * count all cycles that retire 16 or less micro-ops, which
3812 * is every cycle.
3813 *
3814 * Thereby we gain a PEBS capable cycle counter.
3815 */
3816 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3817
3818 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3819 event->hw.config = alt_config;
3820 }
3821 }
3822
intel_pebs_aliases_precdist(struct perf_event * event)3823 static void intel_pebs_aliases_precdist(struct perf_event *event)
3824 {
3825 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3826 /*
3827 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3828 * (0x003c) so that we can use it with PEBS.
3829 *
3830 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3831 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3832 * (0x01c0), which is a PEBS capable event, to get the same
3833 * count.
3834 *
3835 * The PREC_DIST event has special support to minimize sample
3836 * shadowing effects. One drawback is that it can be
3837 * only programmed on counter 1, but that seems like an
3838 * acceptable trade off.
3839 */
3840 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3841
3842 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3843 event->hw.config = alt_config;
3844 }
3845 }
3846
intel_pebs_aliases_ivb(struct perf_event * event)3847 static void intel_pebs_aliases_ivb(struct perf_event *event)
3848 {
3849 if (event->attr.precise_ip < 3)
3850 return intel_pebs_aliases_snb(event);
3851 return intel_pebs_aliases_precdist(event);
3852 }
3853
intel_pebs_aliases_skl(struct perf_event * event)3854 static void intel_pebs_aliases_skl(struct perf_event *event)
3855 {
3856 if (event->attr.precise_ip < 3)
3857 return intel_pebs_aliases_core2(event);
3858 return intel_pebs_aliases_precdist(event);
3859 }
3860
intel_pmu_large_pebs_flags(struct perf_event * event)3861 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3862 {
3863 unsigned long flags = x86_pmu.large_pebs_flags;
3864
3865 if (event->attr.use_clockid)
3866 flags &= ~PERF_SAMPLE_TIME;
3867 if (!event->attr.exclude_kernel)
3868 flags &= ~PERF_SAMPLE_REGS_USER;
3869 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3870 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3871 return flags;
3872 }
3873
intel_pmu_bts_config(struct perf_event * event)3874 static int intel_pmu_bts_config(struct perf_event *event)
3875 {
3876 struct perf_event_attr *attr = &event->attr;
3877
3878 if (unlikely(intel_pmu_has_bts(event))) {
3879 /* BTS is not supported by this architecture. */
3880 if (!x86_pmu.bts_active)
3881 return -EOPNOTSUPP;
3882
3883 /* BTS is currently only allowed for user-mode. */
3884 if (!attr->exclude_kernel)
3885 return -EOPNOTSUPP;
3886
3887 /* BTS is not allowed for precise events. */
3888 if (attr->precise_ip)
3889 return -EOPNOTSUPP;
3890
3891 /* disallow bts if conflicting events are present */
3892 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3893 return -EBUSY;
3894
3895 event->destroy = hw_perf_lbr_event_destroy;
3896 }
3897
3898 return 0;
3899 }
3900
core_pmu_hw_config(struct perf_event * event)3901 static int core_pmu_hw_config(struct perf_event *event)
3902 {
3903 int ret = x86_pmu_hw_config(event);
3904
3905 if (ret)
3906 return ret;
3907
3908 return intel_pmu_bts_config(event);
3909 }
3910
3911 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3912 ((x86_pmu.num_topdown_events - 1) << 8))
3913
is_available_metric_event(struct perf_event * event)3914 static bool is_available_metric_event(struct perf_event *event)
3915 {
3916 return is_metric_event(event) &&
3917 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3918 }
3919
is_mem_loads_event(struct perf_event * event)3920 static inline bool is_mem_loads_event(struct perf_event *event)
3921 {
3922 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3923 }
3924
is_mem_loads_aux_event(struct perf_event * event)3925 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3926 {
3927 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3928 }
3929
require_mem_loads_aux_event(struct perf_event * event)3930 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3931 {
3932 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3933 return false;
3934
3935 if (is_hybrid())
3936 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
3937
3938 return true;
3939 }
3940
intel_pmu_has_cap(struct perf_event * event,int idx)3941 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3942 {
3943 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3944
3945 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3946 }
3947
intel_pmu_hw_config(struct perf_event * event)3948 static int intel_pmu_hw_config(struct perf_event *event)
3949 {
3950 int ret = x86_pmu_hw_config(event);
3951
3952 if (ret)
3953 return ret;
3954
3955 ret = intel_pmu_bts_config(event);
3956 if (ret)
3957 return ret;
3958
3959 if (event->attr.precise_ip) {
3960 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3961 return -EINVAL;
3962
3963 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3964 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3965 if (!(event->attr.sample_type &
3966 ~intel_pmu_large_pebs_flags(event))) {
3967 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3968 event->attach_state |= PERF_ATTACH_SCHED_CB;
3969 }
3970 }
3971 if (x86_pmu.pebs_aliases)
3972 x86_pmu.pebs_aliases(event);
3973 }
3974
3975 if (needs_branch_stack(event) && is_sampling_event(event))
3976 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
3977
3978 if (branch_sample_counters(event)) {
3979 struct perf_event *leader, *sibling;
3980 int num = 0;
3981
3982 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
3983 (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
3984 return -EINVAL;
3985
3986 /*
3987 * The branch counter logging is not supported in the call stack
3988 * mode yet, since we cannot simply flush the LBR during e.g.,
3989 * multiplexing. Also, there is no obvious usage with the call
3990 * stack mode. Simply forbids it for now.
3991 *
3992 * If any events in the group enable the branch counter logging
3993 * feature, the group is treated as a branch counter logging
3994 * group, which requires the extra space to store the counters.
3995 */
3996 leader = event->group_leader;
3997 if (branch_sample_call_stack(leader))
3998 return -EINVAL;
3999 if (branch_sample_counters(leader))
4000 num++;
4001 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
4002
4003 for_each_sibling_event(sibling, leader) {
4004 if (branch_sample_call_stack(sibling))
4005 return -EINVAL;
4006 if (branch_sample_counters(sibling))
4007 num++;
4008 }
4009
4010 if (num > fls(x86_pmu.lbr_counters))
4011 return -EINVAL;
4012 /*
4013 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
4014 * require any branch stack setup.
4015 * Clear the bit to avoid unnecessary branch stack setup.
4016 */
4017 if (0 == (event->attr.branch_sample_type &
4018 ~(PERF_SAMPLE_BRANCH_PLM_ALL |
4019 PERF_SAMPLE_BRANCH_COUNTERS)))
4020 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
4021
4022 /*
4023 * Force the leader to be a LBR event. So LBRs can be reset
4024 * with the leader event. See intel_pmu_lbr_del() for details.
4025 */
4026 if (!intel_pmu_needs_branch_stack(leader))
4027 return -EINVAL;
4028 }
4029
4030 if (intel_pmu_needs_branch_stack(event)) {
4031 ret = intel_pmu_setup_lbr_filter(event);
4032 if (ret)
4033 return ret;
4034 event->attach_state |= PERF_ATTACH_SCHED_CB;
4035
4036 /*
4037 * BTS is set up earlier in this path, so don't account twice
4038 */
4039 if (!unlikely(intel_pmu_has_bts(event))) {
4040 /* disallow lbr if conflicting events are present */
4041 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
4042 return -EBUSY;
4043
4044 event->destroy = hw_perf_lbr_event_destroy;
4045 }
4046 }
4047
4048 if (event->attr.aux_output) {
4049 if (!event->attr.precise_ip)
4050 return -EINVAL;
4051
4052 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
4053 }
4054
4055 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
4056 (event->attr.type == PERF_TYPE_HW_CACHE))
4057 return 0;
4058
4059 /*
4060 * Config Topdown slots and metric events
4061 *
4062 * The slots event on Fixed Counter 3 can support sampling,
4063 * which will be handled normally in x86_perf_event_update().
4064 *
4065 * Metric events don't support sampling and require being paired
4066 * with a slots event as group leader. When the slots event
4067 * is used in a metrics group, it too cannot support sampling.
4068 */
4069 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4070 if (event->attr.config1 || event->attr.config2)
4071 return -EINVAL;
4072
4073 /*
4074 * The TopDown metrics events and slots event don't
4075 * support any filters.
4076 */
4077 if (event->attr.config & X86_ALL_EVENT_FLAGS)
4078 return -EINVAL;
4079
4080 if (is_available_metric_event(event)) {
4081 struct perf_event *leader = event->group_leader;
4082
4083 /* The metric events don't support sampling. */
4084 if (is_sampling_event(event))
4085 return -EINVAL;
4086
4087 /* The metric events require a slots group leader. */
4088 if (!is_slots_event(leader))
4089 return -EINVAL;
4090
4091 /*
4092 * The leader/SLOTS must not be a sampling event for
4093 * metric use; hardware requires it starts at 0 when used
4094 * in conjunction with MSR_PERF_METRICS.
4095 */
4096 if (is_sampling_event(leader))
4097 return -EINVAL;
4098
4099 event->event_caps |= PERF_EV_CAP_SIBLING;
4100 /*
4101 * Only once we have a METRICs sibling do we
4102 * need TopDown magic.
4103 */
4104 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4105 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4106 }
4107 }
4108
4109 /*
4110 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4111 * doesn't function quite right. As a work-around it needs to always be
4112 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4113 * The actual count of this second event is irrelevant it just needs
4114 * to be active to make the first event function correctly.
4115 *
4116 * In a group, the auxiliary event must be in front of the load latency
4117 * event. The rule is to simplify the implementation of the check.
4118 * That's because perf cannot have a complete group at the moment.
4119 */
4120 if (require_mem_loads_aux_event(event) &&
4121 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4122 is_mem_loads_event(event)) {
4123 struct perf_event *leader = event->group_leader;
4124 struct perf_event *sibling = NULL;
4125
4126 /*
4127 * When this memload event is also the first event (no group
4128 * exists yet), then there is no aux event before it.
4129 */
4130 if (leader == event)
4131 return -ENODATA;
4132
4133 if (!is_mem_loads_aux_event(leader)) {
4134 for_each_sibling_event(sibling, leader) {
4135 if (is_mem_loads_aux_event(sibling))
4136 break;
4137 }
4138 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4139 return -ENODATA;
4140 }
4141 }
4142
4143 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4144 return 0;
4145
4146 if (x86_pmu.version < 3)
4147 return -EINVAL;
4148
4149 ret = perf_allow_cpu(&event->attr);
4150 if (ret)
4151 return ret;
4152
4153 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4154
4155 return 0;
4156 }
4157
4158 /*
4159 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4160 * The host perf context helps to prepare the values of the real hardware for
4161 * a set of msrs that need to be switched atomically in a vmx transaction.
4162 *
4163 * For example, the pseudocode needed to add a new msr should look like:
4164 *
4165 * arr[(*nr)++] = (struct perf_guest_switch_msr){
4166 * .msr = the hardware msr address,
4167 * .host = the value the hardware has when it doesn't run a guest,
4168 * .guest = the value the hardware has when it runs a guest,
4169 * };
4170 *
4171 * These values have nothing to do with the emulated values the guest sees
4172 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4173 * specifically in the intel_pmu_{get,set}_msr().
4174 */
intel_guest_get_msrs(int * nr,void * data)4175 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4176 {
4177 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4178 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4179 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4180 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4181 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4182 int global_ctrl, pebs_enable;
4183
4184 /*
4185 * In addition to obeying exclude_guest/exclude_host, remove bits being
4186 * used for PEBS when running a guest, because PEBS writes to virtual
4187 * addresses (not physical addresses).
4188 */
4189 *nr = 0;
4190 global_ctrl = (*nr)++;
4191 arr[global_ctrl] = (struct perf_guest_switch_msr){
4192 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4193 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4194 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4195 };
4196
4197 if (!x86_pmu.pebs)
4198 return arr;
4199
4200 /*
4201 * If PMU counter has PEBS enabled it is not enough to
4202 * disable counter on a guest entry since PEBS memory
4203 * write can overshoot guest entry and corrupt guest
4204 * memory. Disabling PEBS solves the problem.
4205 *
4206 * Don't do this if the CPU already enforces it.
4207 */
4208 if (x86_pmu.pebs_no_isolation) {
4209 arr[(*nr)++] = (struct perf_guest_switch_msr){
4210 .msr = MSR_IA32_PEBS_ENABLE,
4211 .host = cpuc->pebs_enabled,
4212 .guest = 0,
4213 };
4214 return arr;
4215 }
4216
4217 if (!kvm_pmu || !x86_pmu.pebs_ept)
4218 return arr;
4219
4220 arr[(*nr)++] = (struct perf_guest_switch_msr){
4221 .msr = MSR_IA32_DS_AREA,
4222 .host = (unsigned long)cpuc->ds,
4223 .guest = kvm_pmu->ds_area,
4224 };
4225
4226 if (x86_pmu.intel_cap.pebs_baseline) {
4227 arr[(*nr)++] = (struct perf_guest_switch_msr){
4228 .msr = MSR_PEBS_DATA_CFG,
4229 .host = cpuc->active_pebs_data_cfg,
4230 .guest = kvm_pmu->pebs_data_cfg,
4231 };
4232 }
4233
4234 pebs_enable = (*nr)++;
4235 arr[pebs_enable] = (struct perf_guest_switch_msr){
4236 .msr = MSR_IA32_PEBS_ENABLE,
4237 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4238 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4239 };
4240
4241 if (arr[pebs_enable].host) {
4242 /* Disable guest PEBS if host PEBS is enabled. */
4243 arr[pebs_enable].guest = 0;
4244 } else {
4245 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4246 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4247 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4248 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4249 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4250 }
4251
4252 return arr;
4253 }
4254
core_guest_get_msrs(int * nr,void * data)4255 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4256 {
4257 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4258 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4259 int idx;
4260
4261 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4262 struct perf_event *event = cpuc->events[idx];
4263
4264 arr[idx].msr = x86_pmu_config_addr(idx);
4265 arr[idx].host = arr[idx].guest = 0;
4266
4267 if (!test_bit(idx, cpuc->active_mask))
4268 continue;
4269
4270 arr[idx].host = arr[idx].guest =
4271 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4272
4273 if (event->attr.exclude_host)
4274 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4275 else if (event->attr.exclude_guest)
4276 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4277 }
4278
4279 *nr = x86_pmu_max_num_counters(cpuc->pmu);
4280 return arr;
4281 }
4282
core_pmu_enable_event(struct perf_event * event)4283 static void core_pmu_enable_event(struct perf_event *event)
4284 {
4285 if (!event->attr.exclude_host)
4286 x86_pmu_enable_event(event);
4287 }
4288
core_pmu_enable_all(int added)4289 static void core_pmu_enable_all(int added)
4290 {
4291 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4292 int idx;
4293
4294 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
4295 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4296
4297 if (!test_bit(idx, cpuc->active_mask) ||
4298 cpuc->events[idx]->attr.exclude_host)
4299 continue;
4300
4301 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4302 }
4303 }
4304
hsw_hw_config(struct perf_event * event)4305 static int hsw_hw_config(struct perf_event *event)
4306 {
4307 int ret = intel_pmu_hw_config(event);
4308
4309 if (ret)
4310 return ret;
4311 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4312 return 0;
4313 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4314
4315 /*
4316 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4317 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4318 * this combination.
4319 */
4320 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4321 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4322 event->attr.precise_ip > 0))
4323 return -EOPNOTSUPP;
4324
4325 if (event_is_checkpointed(event)) {
4326 /*
4327 * Sampling of checkpointed events can cause situations where
4328 * the CPU constantly aborts because of a overflow, which is
4329 * then checkpointed back and ignored. Forbid checkpointing
4330 * for sampling.
4331 *
4332 * But still allow a long sampling period, so that perf stat
4333 * from KVM works.
4334 */
4335 if (event->attr.sample_period > 0 &&
4336 event->attr.sample_period < 0x7fffffff)
4337 return -EOPNOTSUPP;
4338 }
4339 return 0;
4340 }
4341
4342 static struct event_constraint counter0_constraint =
4343 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4344
4345 static struct event_constraint counter1_constraint =
4346 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4347
4348 static struct event_constraint counter0_1_constraint =
4349 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4350
4351 static struct event_constraint counter2_constraint =
4352 EVENT_CONSTRAINT(0, 0x4, 0);
4353
4354 static struct event_constraint fixed0_constraint =
4355 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4356
4357 static struct event_constraint fixed0_counter0_constraint =
4358 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4359
4360 static struct event_constraint fixed0_counter0_1_constraint =
4361 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4362
4363 static struct event_constraint counters_1_7_constraint =
4364 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4365
4366 static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4367 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4368 struct perf_event *event)
4369 {
4370 struct event_constraint *c;
4371
4372 c = intel_get_event_constraints(cpuc, idx, event);
4373
4374 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4375 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4376 if (c->idxmsk64 & (1U << 2))
4377 return &counter2_constraint;
4378 return &emptyconstraint;
4379 }
4380
4381 return c;
4382 }
4383
4384 static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4385 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4386 struct perf_event *event)
4387 {
4388 /*
4389 * Fixed counter 0 has less skid.
4390 * Force instruction:ppp in Fixed counter 0
4391 */
4392 if ((event->attr.precise_ip == 3) &&
4393 constraint_match(&fixed0_constraint, event->hw.config))
4394 return &fixed0_constraint;
4395
4396 return hsw_get_event_constraints(cpuc, idx, event);
4397 }
4398
4399 static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4400 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4401 struct perf_event *event)
4402 {
4403 struct event_constraint *c;
4404
4405 c = icl_get_event_constraints(cpuc, idx, event);
4406
4407 /*
4408 * The :ppp indicates the Precise Distribution (PDist) facility, which
4409 * is only supported on the GP counter 0. If a :ppp event which is not
4410 * available on the GP counter 0, error out.
4411 * Exception: Instruction PDIR is only available on the fixed counter 0.
4412 */
4413 if ((event->attr.precise_ip == 3) &&
4414 !constraint_match(&fixed0_constraint, event->hw.config)) {
4415 if (c->idxmsk64 & BIT_ULL(0))
4416 return &counter0_constraint;
4417
4418 return &emptyconstraint;
4419 }
4420
4421 return c;
4422 }
4423
4424 static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4425 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4426 struct perf_event *event)
4427 {
4428 struct event_constraint *c;
4429
4430 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4431 if (event->attr.precise_ip == 3)
4432 return &counter0_constraint;
4433
4434 c = intel_get_event_constraints(cpuc, idx, event);
4435
4436 return c;
4437 }
4438
4439 static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4440 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4441 struct perf_event *event)
4442 {
4443 struct event_constraint *c;
4444
4445 c = intel_get_event_constraints(cpuc, idx, event);
4446
4447 /*
4448 * :ppp means to do reduced skid PEBS,
4449 * which is available on PMC0 and fixed counter 0.
4450 */
4451 if (event->attr.precise_ip == 3) {
4452 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4453 if (constraint_match(&fixed0_constraint, event->hw.config))
4454 return &fixed0_counter0_constraint;
4455
4456 return &counter0_constraint;
4457 }
4458
4459 return c;
4460 }
4461
4462 static bool allow_tsx_force_abort = true;
4463
4464 static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4465 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4466 struct perf_event *event)
4467 {
4468 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4469
4470 /*
4471 * Without TFA we must not use PMC3.
4472 */
4473 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4474 c = dyn_constraint(cpuc, c, idx);
4475 c->idxmsk64 &= ~(1ULL << 3);
4476 c->weight--;
4477 }
4478
4479 return c;
4480 }
4481
4482 static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4483 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4484 struct perf_event *event)
4485 {
4486 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4487
4488 if (pmu->pmu_type == hybrid_big)
4489 return glc_get_event_constraints(cpuc, idx, event);
4490 else if (pmu->pmu_type == hybrid_small)
4491 return tnt_get_event_constraints(cpuc, idx, event);
4492
4493 WARN_ON(1);
4494 return &emptyconstraint;
4495 }
4496
4497 static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4498 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4499 struct perf_event *event)
4500 {
4501 struct event_constraint *c;
4502
4503 c = intel_get_event_constraints(cpuc, idx, event);
4504
4505 /*
4506 * The :ppp indicates the Precise Distribution (PDist) facility, which
4507 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4508 * If a :ppp event which is not available on the above eligible counters,
4509 * error out.
4510 */
4511 if (event->attr.precise_ip == 3) {
4512 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4513 if (constraint_match(&fixed0_constraint, event->hw.config)) {
4514 /* The fixed counter 0 doesn't support LBR event logging. */
4515 if (branch_sample_counters(event))
4516 return &counter0_1_constraint;
4517 else
4518 return &fixed0_counter0_1_constraint;
4519 }
4520
4521 switch (c->idxmsk64 & 0x3ull) {
4522 case 0x1:
4523 return &counter0_constraint;
4524 case 0x2:
4525 return &counter1_constraint;
4526 case 0x3:
4527 return &counter0_1_constraint;
4528 }
4529 return &emptyconstraint;
4530 }
4531
4532 return c;
4533 }
4534
4535 static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4536 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4537 struct perf_event *event)
4538 {
4539 struct event_constraint *c;
4540
4541 c = glc_get_event_constraints(cpuc, idx, event);
4542
4543 /* The Retire Latency is not supported by the fixed counter 0. */
4544 if (event->attr.precise_ip &&
4545 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4546 constraint_match(&fixed0_constraint, event->hw.config)) {
4547 /*
4548 * The Instruction PDIR is only available
4549 * on the fixed counter 0. Error out for this case.
4550 */
4551 if (event->attr.precise_ip == 3)
4552 return &emptyconstraint;
4553 return &counters_1_7_constraint;
4554 }
4555
4556 return c;
4557 }
4558
4559 static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)4560 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4561 struct perf_event *event)
4562 {
4563 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4564
4565 if (pmu->pmu_type == hybrid_big)
4566 return rwc_get_event_constraints(cpuc, idx, event);
4567 if (pmu->pmu_type == hybrid_small)
4568 return cmt_get_event_constraints(cpuc, idx, event);
4569
4570 WARN_ON(1);
4571 return &emptyconstraint;
4572 }
4573
adl_hw_config(struct perf_event * event)4574 static int adl_hw_config(struct perf_event *event)
4575 {
4576 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4577
4578 if (pmu->pmu_type == hybrid_big)
4579 return hsw_hw_config(event);
4580 else if (pmu->pmu_type == hybrid_small)
4581 return intel_pmu_hw_config(event);
4582
4583 WARN_ON(1);
4584 return -EOPNOTSUPP;
4585 }
4586
adl_get_hybrid_cpu_type(void)4587 static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
4588 {
4589 return HYBRID_INTEL_CORE;
4590 }
4591
4592 /*
4593 * Broadwell:
4594 *
4595 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4596 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4597 * the two to enforce a minimum period of 128 (the smallest value that has bits
4598 * 0-5 cleared and >= 100).
4599 *
4600 * Because of how the code in x86_perf_event_set_period() works, the truncation
4601 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4602 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4603 *
4604 * Therefore the effective (average) period matches the requested period,
4605 * despite coarser hardware granularity.
4606 */
bdw_limit_period(struct perf_event * event,s64 * left)4607 static void bdw_limit_period(struct perf_event *event, s64 *left)
4608 {
4609 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4610 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4611 if (*left < 128)
4612 *left = 128;
4613 *left &= ~0x3fULL;
4614 }
4615 }
4616
nhm_limit_period(struct perf_event * event,s64 * left)4617 static void nhm_limit_period(struct perf_event *event, s64 *left)
4618 {
4619 *left = max(*left, 32LL);
4620 }
4621
glc_limit_period(struct perf_event * event,s64 * left)4622 static void glc_limit_period(struct perf_event *event, s64 *left)
4623 {
4624 if (event->attr.precise_ip == 3)
4625 *left = max(*left, 128LL);
4626 }
4627
4628 PMU_FORMAT_ATTR(event, "config:0-7" );
4629 PMU_FORMAT_ATTR(umask, "config:8-15" );
4630 PMU_FORMAT_ATTR(edge, "config:18" );
4631 PMU_FORMAT_ATTR(pc, "config:19" );
4632 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4633 PMU_FORMAT_ATTR(inv, "config:23" );
4634 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4635 PMU_FORMAT_ATTR(in_tx, "config:32" );
4636 PMU_FORMAT_ATTR(in_tx_cp, "config:33" );
4637 PMU_FORMAT_ATTR(eq, "config:36" ); /* v6 + */
4638
umask2_show(struct device * dev,struct device_attribute * attr,char * page)4639 static ssize_t umask2_show(struct device *dev,
4640 struct device_attribute *attr,
4641 char *page)
4642 {
4643 u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
4644
4645 if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
4646 return sprintf(page, "config:8-15,40-47\n");
4647
4648 /* Roll back to the old format if umask2 is not supported. */
4649 return sprintf(page, "config:8-15\n");
4650 }
4651
4652 static struct device_attribute format_attr_umask2 =
4653 __ATTR(umask, 0444, umask2_show, NULL);
4654
4655 static struct attribute *format_evtsel_ext_attrs[] = {
4656 &format_attr_umask2.attr,
4657 &format_attr_eq.attr,
4658 NULL
4659 };
4660
4661 static umode_t
evtsel_ext_is_visible(struct kobject * kobj,struct attribute * attr,int i)4662 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4663 {
4664 struct device *dev = kobj_to_dev(kobj);
4665 u64 mask;
4666
4667 /*
4668 * The umask and umask2 have different formats but share the
4669 * same attr name. In update mode, the previous value of the
4670 * umask is unconditionally removed before is_visible. If
4671 * umask2 format is not enumerated, it's impossible to roll
4672 * back to the old format.
4673 * Does the check in umask2_show rather than is_visible.
4674 */
4675 if (i == 0)
4676 return attr->mode;
4677
4678 mask = hybrid(dev_get_drvdata(dev), config_mask);
4679 if (i == 1)
4680 return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
4681
4682 return 0;
4683 }
4684
4685 static struct attribute *intel_arch_formats_attr[] = {
4686 &format_attr_event.attr,
4687 &format_attr_umask.attr,
4688 &format_attr_edge.attr,
4689 &format_attr_pc.attr,
4690 &format_attr_inv.attr,
4691 &format_attr_cmask.attr,
4692 NULL,
4693 };
4694
intel_event_sysfs_show(char * page,u64 config)4695 ssize_t intel_event_sysfs_show(char *page, u64 config)
4696 {
4697 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4698
4699 return x86_event_sysfs_show(page, config, event);
4700 }
4701
allocate_shared_regs(int cpu)4702 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4703 {
4704 struct intel_shared_regs *regs;
4705 int i;
4706
4707 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4708 GFP_KERNEL, cpu_to_node(cpu));
4709 if (regs) {
4710 /*
4711 * initialize the locks to keep lockdep happy
4712 */
4713 for (i = 0; i < EXTRA_REG_MAX; i++)
4714 raw_spin_lock_init(®s->regs[i].lock);
4715
4716 regs->core_id = -1;
4717 }
4718 return regs;
4719 }
4720
allocate_excl_cntrs(int cpu)4721 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4722 {
4723 struct intel_excl_cntrs *c;
4724
4725 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4726 GFP_KERNEL, cpu_to_node(cpu));
4727 if (c) {
4728 raw_spin_lock_init(&c->lock);
4729 c->core_id = -1;
4730 }
4731 return c;
4732 }
4733
4734
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)4735 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4736 {
4737 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4738
4739 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4740 cpuc->shared_regs = allocate_shared_regs(cpu);
4741 if (!cpuc->shared_regs)
4742 goto err;
4743 }
4744
4745 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
4746 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4747
4748 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4749 if (!cpuc->constraint_list)
4750 goto err_shared_regs;
4751 }
4752
4753 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4754 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4755 if (!cpuc->excl_cntrs)
4756 goto err_constraint_list;
4757
4758 cpuc->excl_thread_id = 0;
4759 }
4760
4761 return 0;
4762
4763 err_constraint_list:
4764 kfree(cpuc->constraint_list);
4765 cpuc->constraint_list = NULL;
4766
4767 err_shared_regs:
4768 kfree(cpuc->shared_regs);
4769 cpuc->shared_regs = NULL;
4770
4771 err:
4772 return -ENOMEM;
4773 }
4774
intel_pmu_cpu_prepare(int cpu)4775 static int intel_pmu_cpu_prepare(int cpu)
4776 {
4777 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4778 }
4779
flip_smm_bit(void * data)4780 static void flip_smm_bit(void *data)
4781 {
4782 unsigned long set = *(unsigned long *)data;
4783
4784 if (set > 0) {
4785 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4786 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4787 } else {
4788 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4789 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4790 }
4791 }
4792
intel_pmu_check_counters_mask(u64 * cntr_mask,u64 * fixed_cntr_mask,u64 * intel_ctrl)4793 static void intel_pmu_check_counters_mask(u64 *cntr_mask,
4794 u64 *fixed_cntr_mask,
4795 u64 *intel_ctrl)
4796 {
4797 unsigned int bit;
4798
4799 bit = fls64(*cntr_mask);
4800 if (bit > INTEL_PMC_MAX_GENERIC) {
4801 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4802 bit, INTEL_PMC_MAX_GENERIC);
4803 *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
4804 }
4805 *intel_ctrl = *cntr_mask;
4806
4807 bit = fls64(*fixed_cntr_mask);
4808 if (bit > INTEL_PMC_MAX_FIXED) {
4809 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4810 bit, INTEL_PMC_MAX_FIXED);
4811 *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
4812 }
4813
4814 *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
4815 }
4816
4817 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
4818 u64 cntr_mask,
4819 u64 fixed_cntr_mask,
4820 u64 intel_ctrl);
4821
4822 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
4823
intel_pmu_broken_perf_cap(void)4824 static inline bool intel_pmu_broken_perf_cap(void)
4825 {
4826 /* The Perf Metric (Bit 15) is always cleared */
4827 if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
4828 boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
4829 return true;
4830
4831 return false;
4832 }
4833
update_pmu_cap(struct x86_hybrid_pmu * pmu)4834 static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
4835 {
4836 unsigned int sub_bitmaps, eax, ebx, ecx, edx;
4837
4838 cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
4839
4840 if (ebx & ARCH_PERFMON_EXT_UMASK2)
4841 pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
4842 if (ebx & ARCH_PERFMON_EXT_EQ)
4843 pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
4844
4845 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
4846 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
4847 &eax, &ebx, &ecx, &edx);
4848 pmu->cntr_mask64 = eax;
4849 pmu->fixed_cntr_mask64 = ebx;
4850 }
4851
4852 if (!intel_pmu_broken_perf_cap()) {
4853 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
4854 rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
4855 }
4856 }
4857
intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu * pmu)4858 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
4859 {
4860 intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
4861 &pmu->intel_ctrl);
4862 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
4863 pmu->unconstrained = (struct event_constraint)
4864 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
4865 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
4866
4867 if (pmu->intel_cap.perf_metrics)
4868 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
4869 else
4870 pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4871
4872 if (pmu->intel_cap.pebs_output_pt_available)
4873 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
4874 else
4875 pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
4876
4877 intel_pmu_check_event_constraints(pmu->event_constraints,
4878 pmu->cntr_mask64,
4879 pmu->fixed_cntr_mask64,
4880 pmu->intel_ctrl);
4881
4882 intel_pmu_check_extra_regs(pmu->extra_regs);
4883 }
4884
find_hybrid_pmu_for_cpu(void)4885 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
4886 {
4887 u8 cpu_type = get_this_hybrid_cpu_type();
4888 int i;
4889
4890 /*
4891 * This is running on a CPU model that is known to have hybrid
4892 * configurations. But the CPU told us it is not hybrid, shame
4893 * on it. There should be a fixup function provided for these
4894 * troublesome CPUs (->get_hybrid_cpu_type).
4895 */
4896 if (cpu_type == HYBRID_INTEL_NONE) {
4897 if (x86_pmu.get_hybrid_cpu_type)
4898 cpu_type = x86_pmu.get_hybrid_cpu_type();
4899 else
4900 return NULL;
4901 }
4902
4903 /*
4904 * This essentially just maps between the 'hybrid_cpu_type'
4905 * and 'hybrid_pmu_type' enums:
4906 */
4907 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4908 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
4909
4910 if (cpu_type == HYBRID_INTEL_CORE &&
4911 pmu_type == hybrid_big)
4912 return &x86_pmu.hybrid_pmu[i];
4913 if (cpu_type == HYBRID_INTEL_ATOM &&
4914 pmu_type == hybrid_small)
4915 return &x86_pmu.hybrid_pmu[i];
4916 }
4917
4918 return NULL;
4919 }
4920
init_hybrid_pmu(int cpu)4921 static bool init_hybrid_pmu(int cpu)
4922 {
4923 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4924 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
4925
4926 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4927 cpuc->pmu = NULL;
4928 return false;
4929 }
4930
4931 /* Only check and dump the PMU information for the first CPU */
4932 if (!cpumask_empty(&pmu->supported_cpus))
4933 goto end;
4934
4935 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
4936 update_pmu_cap(pmu);
4937
4938 intel_pmu_check_hybrid_pmus(pmu);
4939
4940 if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
4941 return false;
4942
4943 pr_info("%s PMU driver: ", pmu->name);
4944
4945 if (pmu->intel_cap.pebs_output_pt_available)
4946 pr_cont("PEBS-via-PT ");
4947
4948 pr_cont("\n");
4949
4950 x86_pmu_show_pmu_cap(&pmu->pmu);
4951
4952 end:
4953 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4954 cpuc->pmu = &pmu->pmu;
4955
4956 return true;
4957 }
4958
intel_pmu_cpu_starting(int cpu)4959 static void intel_pmu_cpu_starting(int cpu)
4960 {
4961 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4962 int core_id = topology_core_id(cpu);
4963 int i;
4964
4965 if (is_hybrid() && !init_hybrid_pmu(cpu))
4966 return;
4967
4968 init_debug_store_on_cpu(cpu);
4969 /*
4970 * Deal with CPUs that don't clear their LBRs on power-up.
4971 */
4972 intel_pmu_lbr_reset();
4973
4974 cpuc->lbr_sel = NULL;
4975
4976 if (x86_pmu.flags & PMU_FL_TFA) {
4977 WARN_ON_ONCE(cpuc->tfa_shadow);
4978 cpuc->tfa_shadow = ~0ULL;
4979 intel_set_tfa(cpuc, false);
4980 }
4981
4982 if (x86_pmu.version > 1)
4983 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4984
4985 /*
4986 * Disable perf metrics if any added CPU doesn't support it.
4987 *
4988 * Turn off the check for a hybrid architecture, because the
4989 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4990 * the architecture features. The perf metrics is a model-specific
4991 * feature for now. The corresponding bit should always be 0 on
4992 * a hybrid platform, e.g., Alder Lake.
4993 */
4994 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4995 union perf_capabilities perf_cap;
4996
4997 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4998 if (!perf_cap.perf_metrics) {
4999 x86_pmu.intel_cap.perf_metrics = 0;
5000 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
5001 }
5002 }
5003
5004 if (!cpuc->shared_regs)
5005 return;
5006
5007 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
5008 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5009 struct intel_shared_regs *pc;
5010
5011 pc = per_cpu(cpu_hw_events, i).shared_regs;
5012 if (pc && pc->core_id == core_id) {
5013 cpuc->kfree_on_online[0] = cpuc->shared_regs;
5014 cpuc->shared_regs = pc;
5015 break;
5016 }
5017 }
5018 cpuc->shared_regs->core_id = core_id;
5019 cpuc->shared_regs->refcnt++;
5020 }
5021
5022 if (x86_pmu.lbr_sel_map)
5023 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
5024
5025 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
5026 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
5027 struct cpu_hw_events *sibling;
5028 struct intel_excl_cntrs *c;
5029
5030 sibling = &per_cpu(cpu_hw_events, i);
5031 c = sibling->excl_cntrs;
5032 if (c && c->core_id == core_id) {
5033 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
5034 cpuc->excl_cntrs = c;
5035 if (!sibling->excl_thread_id)
5036 cpuc->excl_thread_id = 1;
5037 break;
5038 }
5039 }
5040 cpuc->excl_cntrs->core_id = core_id;
5041 cpuc->excl_cntrs->refcnt++;
5042 }
5043 }
5044
free_excl_cntrs(struct cpu_hw_events * cpuc)5045 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
5046 {
5047 struct intel_excl_cntrs *c;
5048
5049 c = cpuc->excl_cntrs;
5050 if (c) {
5051 if (c->core_id == -1 || --c->refcnt == 0)
5052 kfree(c);
5053 cpuc->excl_cntrs = NULL;
5054 }
5055
5056 kfree(cpuc->constraint_list);
5057 cpuc->constraint_list = NULL;
5058 }
5059
intel_pmu_cpu_dying(int cpu)5060 static void intel_pmu_cpu_dying(int cpu)
5061 {
5062 fini_debug_store_on_cpu(cpu);
5063 }
5064
intel_cpuc_finish(struct cpu_hw_events * cpuc)5065 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
5066 {
5067 struct intel_shared_regs *pc;
5068
5069 pc = cpuc->shared_regs;
5070 if (pc) {
5071 if (pc->core_id == -1 || --pc->refcnt == 0)
5072 kfree(pc);
5073 cpuc->shared_regs = NULL;
5074 }
5075
5076 free_excl_cntrs(cpuc);
5077 }
5078
intel_pmu_cpu_dead(int cpu)5079 static void intel_pmu_cpu_dead(int cpu)
5080 {
5081 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
5082
5083 intel_cpuc_finish(cpuc);
5084
5085 if (is_hybrid() && cpuc->pmu)
5086 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
5087 }
5088
intel_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,bool sched_in)5089 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5090 bool sched_in)
5091 {
5092 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5093 intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
5094 }
5095
intel_pmu_swap_task_ctx(struct perf_event_pmu_context * prev_epc,struct perf_event_pmu_context * next_epc)5096 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
5097 struct perf_event_pmu_context *next_epc)
5098 {
5099 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
5100 }
5101
intel_pmu_check_period(struct perf_event * event,u64 value)5102 static int intel_pmu_check_period(struct perf_event *event, u64 value)
5103 {
5104 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
5105 }
5106
intel_aux_output_init(void)5107 static void intel_aux_output_init(void)
5108 {
5109 /* Refer also intel_pmu_aux_output_match() */
5110 if (x86_pmu.intel_cap.pebs_output_pt_available)
5111 x86_pmu.assign = intel_pmu_assign_event;
5112 }
5113
intel_pmu_aux_output_match(struct perf_event * event)5114 static int intel_pmu_aux_output_match(struct perf_event *event)
5115 {
5116 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
5117 if (!x86_pmu.intel_cap.pebs_output_pt_available)
5118 return 0;
5119
5120 return is_intel_pt_event(event);
5121 }
5122
intel_pmu_filter(struct pmu * pmu,int cpu,bool * ret)5123 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
5124 {
5125 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
5126
5127 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5128 }
5129
5130 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5131
5132 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5133
5134 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5135
5136 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5137
5138 static struct attribute *intel_arch3_formats_attr[] = {
5139 &format_attr_event.attr,
5140 &format_attr_umask.attr,
5141 &format_attr_edge.attr,
5142 &format_attr_pc.attr,
5143 &format_attr_any.attr,
5144 &format_attr_inv.attr,
5145 &format_attr_cmask.attr,
5146 NULL,
5147 };
5148
5149 static struct attribute *hsw_format_attr[] = {
5150 &format_attr_in_tx.attr,
5151 &format_attr_in_tx_cp.attr,
5152 &format_attr_offcore_rsp.attr,
5153 &format_attr_ldlat.attr,
5154 NULL
5155 };
5156
5157 static struct attribute *nhm_format_attr[] = {
5158 &format_attr_offcore_rsp.attr,
5159 &format_attr_ldlat.attr,
5160 NULL
5161 };
5162
5163 static struct attribute *slm_format_attr[] = {
5164 &format_attr_offcore_rsp.attr,
5165 NULL
5166 };
5167
5168 static struct attribute *cmt_format_attr[] = {
5169 &format_attr_offcore_rsp.attr,
5170 &format_attr_ldlat.attr,
5171 &format_attr_snoop_rsp.attr,
5172 NULL
5173 };
5174
5175 static struct attribute *skl_format_attr[] = {
5176 &format_attr_frontend.attr,
5177 NULL,
5178 };
5179
5180 static __initconst const struct x86_pmu core_pmu = {
5181 .name = "core",
5182 .handle_irq = x86_pmu_handle_irq,
5183 .disable_all = x86_pmu_disable_all,
5184 .enable_all = core_pmu_enable_all,
5185 .enable = core_pmu_enable_event,
5186 .disable = x86_pmu_disable_event,
5187 .hw_config = core_pmu_hw_config,
5188 .schedule_events = x86_schedule_events,
5189 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5190 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5191 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
5192 .event_map = intel_pmu_event_map,
5193 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5194 .apic = 1,
5195 .large_pebs_flags = LARGE_PEBS_FLAGS,
5196
5197 /*
5198 * Intel PMCs cannot be accessed sanely above 32-bit width,
5199 * so we install an artificial 1<<31 period regardless of
5200 * the generic event period:
5201 */
5202 .max_period = (1ULL<<31) - 1,
5203 .get_event_constraints = intel_get_event_constraints,
5204 .put_event_constraints = intel_put_event_constraints,
5205 .event_constraints = intel_core_event_constraints,
5206 .guest_get_msrs = core_guest_get_msrs,
5207 .format_attrs = intel_arch_formats_attr,
5208 .events_sysfs_show = intel_event_sysfs_show,
5209
5210 /*
5211 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
5212 * together with PMU version 1 and thus be using core_pmu with
5213 * shared_regs. We need following callbacks here to allocate
5214 * it properly.
5215 */
5216 .cpu_prepare = intel_pmu_cpu_prepare,
5217 .cpu_starting = intel_pmu_cpu_starting,
5218 .cpu_dying = intel_pmu_cpu_dying,
5219 .cpu_dead = intel_pmu_cpu_dead,
5220
5221 .check_period = intel_pmu_check_period,
5222
5223 .lbr_reset = intel_pmu_lbr_reset_64,
5224 .lbr_read = intel_pmu_lbr_read_64,
5225 .lbr_save = intel_pmu_lbr_save,
5226 .lbr_restore = intel_pmu_lbr_restore,
5227 };
5228
5229 static __initconst const struct x86_pmu intel_pmu = {
5230 .name = "Intel",
5231 .handle_irq = intel_pmu_handle_irq,
5232 .disable_all = intel_pmu_disable_all,
5233 .enable_all = intel_pmu_enable_all,
5234 .enable = intel_pmu_enable_event,
5235 .disable = intel_pmu_disable_event,
5236 .add = intel_pmu_add_event,
5237 .del = intel_pmu_del_event,
5238 .read = intel_pmu_read_event,
5239 .set_period = intel_pmu_set_period,
5240 .update = intel_pmu_update,
5241 .hw_config = intel_pmu_hw_config,
5242 .schedule_events = x86_schedule_events,
5243 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5244 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5245 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
5246 .event_map = intel_pmu_event_map,
5247 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5248 .apic = 1,
5249 .large_pebs_flags = LARGE_PEBS_FLAGS,
5250 /*
5251 * Intel PMCs cannot be accessed sanely above 32 bit width,
5252 * so we install an artificial 1<<31 period regardless of
5253 * the generic event period:
5254 */
5255 .max_period = (1ULL << 31) - 1,
5256 .get_event_constraints = intel_get_event_constraints,
5257 .put_event_constraints = intel_put_event_constraints,
5258 .pebs_aliases = intel_pebs_aliases_core2,
5259
5260 .format_attrs = intel_arch3_formats_attr,
5261 .events_sysfs_show = intel_event_sysfs_show,
5262
5263 .cpu_prepare = intel_pmu_cpu_prepare,
5264 .cpu_starting = intel_pmu_cpu_starting,
5265 .cpu_dying = intel_pmu_cpu_dying,
5266 .cpu_dead = intel_pmu_cpu_dead,
5267
5268 .guest_get_msrs = intel_guest_get_msrs,
5269 .sched_task = intel_pmu_sched_task,
5270 .swap_task_ctx = intel_pmu_swap_task_ctx,
5271
5272 .check_period = intel_pmu_check_period,
5273
5274 .aux_output_match = intel_pmu_aux_output_match,
5275
5276 .lbr_reset = intel_pmu_lbr_reset_64,
5277 .lbr_read = intel_pmu_lbr_read_64,
5278 .lbr_save = intel_pmu_lbr_save,
5279 .lbr_restore = intel_pmu_lbr_restore,
5280
5281 /*
5282 * SMM has access to all 4 rings and while traditionally SMM code only
5283 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5284 *
5285 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5286 * between SMM or not, this results in what should be pure userspace
5287 * counters including SMM data.
5288 *
5289 * This is a clear privilege issue, therefore globally disable
5290 * counting SMM by default.
5291 */
5292 .attr_freeze_on_smi = 1,
5293 };
5294
intel_clovertown_quirk(void)5295 static __init void intel_clovertown_quirk(void)
5296 {
5297 /*
5298 * PEBS is unreliable due to:
5299 *
5300 * AJ67 - PEBS may experience CPL leaks
5301 * AJ68 - PEBS PMI may be delayed by one event
5302 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5303 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5304 *
5305 * AJ67 could be worked around by restricting the OS/USR flags.
5306 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5307 *
5308 * AJ106 could possibly be worked around by not allowing LBR
5309 * usage from PEBS, including the fixup.
5310 * AJ68 could possibly be worked around by always programming
5311 * a pebs_event_reset[0] value and coping with the lost events.
5312 *
5313 * But taken together it might just make sense to not enable PEBS on
5314 * these chips.
5315 */
5316 pr_warn("PEBS disabled due to CPU errata\n");
5317 x86_pmu.pebs = 0;
5318 x86_pmu.pebs_constraints = NULL;
5319 }
5320
5321 static const struct x86_cpu_desc isolation_ucodes[] = {
5322 INTEL_CPU_DESC(INTEL_HASWELL, 3, 0x0000001f),
5323 INTEL_CPU_DESC(INTEL_HASWELL_L, 1, 0x0000001e),
5324 INTEL_CPU_DESC(INTEL_HASWELL_G, 1, 0x00000015),
5325 INTEL_CPU_DESC(INTEL_HASWELL_X, 2, 0x00000037),
5326 INTEL_CPU_DESC(INTEL_HASWELL_X, 4, 0x0000000a),
5327 INTEL_CPU_DESC(INTEL_BROADWELL, 4, 0x00000023),
5328 INTEL_CPU_DESC(INTEL_BROADWELL_G, 1, 0x00000014),
5329 INTEL_CPU_DESC(INTEL_BROADWELL_D, 2, 0x00000010),
5330 INTEL_CPU_DESC(INTEL_BROADWELL_D, 3, 0x07000009),
5331 INTEL_CPU_DESC(INTEL_BROADWELL_D, 4, 0x0f000009),
5332 INTEL_CPU_DESC(INTEL_BROADWELL_D, 5, 0x0e000002),
5333 INTEL_CPU_DESC(INTEL_BROADWELL_X, 1, 0x0b000014),
5334 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 3, 0x00000021),
5335 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 4, 0x00000000),
5336 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 5, 0x00000000),
5337 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 6, 0x00000000),
5338 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 7, 0x00000000),
5339 INTEL_CPU_DESC(INTEL_SKYLAKE_X, 11, 0x00000000),
5340 INTEL_CPU_DESC(INTEL_SKYLAKE_L, 3, 0x0000007c),
5341 INTEL_CPU_DESC(INTEL_SKYLAKE, 3, 0x0000007c),
5342 INTEL_CPU_DESC(INTEL_KABYLAKE, 9, 0x0000004e),
5343 INTEL_CPU_DESC(INTEL_KABYLAKE_L, 9, 0x0000004e),
5344 INTEL_CPU_DESC(INTEL_KABYLAKE_L, 10, 0x0000004e),
5345 INTEL_CPU_DESC(INTEL_KABYLAKE_L, 11, 0x0000004e),
5346 INTEL_CPU_DESC(INTEL_KABYLAKE_L, 12, 0x0000004e),
5347 INTEL_CPU_DESC(INTEL_KABYLAKE, 10, 0x0000004e),
5348 INTEL_CPU_DESC(INTEL_KABYLAKE, 11, 0x0000004e),
5349 INTEL_CPU_DESC(INTEL_KABYLAKE, 12, 0x0000004e),
5350 INTEL_CPU_DESC(INTEL_KABYLAKE, 13, 0x0000004e),
5351 {}
5352 };
5353
intel_check_pebs_isolation(void)5354 static void intel_check_pebs_isolation(void)
5355 {
5356 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
5357 }
5358
intel_pebs_isolation_quirk(void)5359 static __init void intel_pebs_isolation_quirk(void)
5360 {
5361 WARN_ON_ONCE(x86_pmu.check_microcode);
5362 x86_pmu.check_microcode = intel_check_pebs_isolation;
5363 intel_check_pebs_isolation();
5364 }
5365
5366 static const struct x86_cpu_desc pebs_ucodes[] = {
5367 INTEL_CPU_DESC(INTEL_SANDYBRIDGE, 7, 0x00000028),
5368 INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 6, 0x00000618),
5369 INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 7, 0x0000070c),
5370 {}
5371 };
5372
intel_snb_pebs_broken(void)5373 static bool intel_snb_pebs_broken(void)
5374 {
5375 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
5376 }
5377
intel_snb_check_microcode(void)5378 static void intel_snb_check_microcode(void)
5379 {
5380 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5381 return;
5382
5383 /*
5384 * Serialized by the microcode lock..
5385 */
5386 if (x86_pmu.pebs_broken) {
5387 pr_info("PEBS enabled due to microcode update\n");
5388 x86_pmu.pebs_broken = 0;
5389 } else {
5390 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5391 x86_pmu.pebs_broken = 1;
5392 }
5393 }
5394
is_lbr_from(unsigned long msr)5395 static bool is_lbr_from(unsigned long msr)
5396 {
5397 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5398
5399 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5400 }
5401
5402 /*
5403 * Under certain circumstances, access certain MSR may cause #GP.
5404 * The function tests if the input MSR can be safely accessed.
5405 */
check_msr(unsigned long msr,u64 mask)5406 static bool check_msr(unsigned long msr, u64 mask)
5407 {
5408 u64 val_old, val_new, val_tmp;
5409
5410 /*
5411 * Disable the check for real HW, so we don't
5412 * mess with potentially enabled registers:
5413 */
5414 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5415 return true;
5416
5417 /*
5418 * Read the current value, change it and read it back to see if it
5419 * matches, this is needed to detect certain hardware emulators
5420 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5421 */
5422 if (rdmsrl_safe(msr, &val_old))
5423 return false;
5424
5425 /*
5426 * Only change the bits which can be updated by wrmsrl.
5427 */
5428 val_tmp = val_old ^ mask;
5429
5430 if (is_lbr_from(msr))
5431 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5432
5433 if (wrmsrl_safe(msr, val_tmp) ||
5434 rdmsrl_safe(msr, &val_new))
5435 return false;
5436
5437 /*
5438 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5439 * should equal rdmsrl()'s even with the quirk.
5440 */
5441 if (val_new != val_tmp)
5442 return false;
5443
5444 if (is_lbr_from(msr))
5445 val_old = lbr_from_signext_quirk_wr(val_old);
5446
5447 /* Here it's sure that the MSR can be safely accessed.
5448 * Restore the old value and return.
5449 */
5450 wrmsrl(msr, val_old);
5451
5452 return true;
5453 }
5454
intel_sandybridge_quirk(void)5455 static __init void intel_sandybridge_quirk(void)
5456 {
5457 x86_pmu.check_microcode = intel_snb_check_microcode;
5458 cpus_read_lock();
5459 intel_snb_check_microcode();
5460 cpus_read_unlock();
5461 }
5462
5463 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5464 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5465 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5466 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5467 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5468 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5469 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5470 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5471 };
5472
intel_arch_events_quirk(void)5473 static __init void intel_arch_events_quirk(void)
5474 {
5475 int bit;
5476
5477 /* disable event that reported as not present by cpuid */
5478 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5479 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5480 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5481 intel_arch_events_map[bit].name);
5482 }
5483 }
5484
intel_nehalem_quirk(void)5485 static __init void intel_nehalem_quirk(void)
5486 {
5487 union cpuid10_ebx ebx;
5488
5489 ebx.full = x86_pmu.events_maskl;
5490 if (ebx.split.no_branch_misses_retired) {
5491 /*
5492 * Erratum AAJ80 detected, we work it around by using
5493 * the BR_MISP_EXEC.ANY event. This will over-count
5494 * branch-misses, but it's still much better than the
5495 * architectural event which is often completely bogus:
5496 */
5497 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5498 ebx.split.no_branch_misses_retired = 0;
5499 x86_pmu.events_maskl = ebx.full;
5500 pr_info("CPU erratum AAJ80 worked around\n");
5501 }
5502 }
5503
5504 /*
5505 * enable software workaround for errata:
5506 * SNB: BJ122
5507 * IVB: BV98
5508 * HSW: HSD29
5509 *
5510 * Only needed when HT is enabled. However detecting
5511 * if HT is enabled is difficult (model specific). So instead,
5512 * we enable the workaround in the early boot, and verify if
5513 * it is needed in a later initcall phase once we have valid
5514 * topology information to check if HT is actually enabled
5515 */
intel_ht_bug(void)5516 static __init void intel_ht_bug(void)
5517 {
5518 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5519
5520 x86_pmu.start_scheduling = intel_start_scheduling;
5521 x86_pmu.commit_scheduling = intel_commit_scheduling;
5522 x86_pmu.stop_scheduling = intel_stop_scheduling;
5523 }
5524
5525 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5526 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5527
5528 /* Haswell special events */
5529 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5530 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5531 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5532 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5533 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5534 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5535 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5536 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5537 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5538 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5539 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5540 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5541
5542 static struct attribute *hsw_events_attrs[] = {
5543 EVENT_PTR(td_slots_issued),
5544 EVENT_PTR(td_slots_retired),
5545 EVENT_PTR(td_fetch_bubbles),
5546 EVENT_PTR(td_total_slots),
5547 EVENT_PTR(td_total_slots_scale),
5548 EVENT_PTR(td_recovery_bubbles),
5549 EVENT_PTR(td_recovery_bubbles_scale),
5550 NULL
5551 };
5552
5553 static struct attribute *hsw_mem_events_attrs[] = {
5554 EVENT_PTR(mem_ld_hsw),
5555 EVENT_PTR(mem_st_hsw),
5556 NULL,
5557 };
5558
5559 static struct attribute *hsw_tsx_events_attrs[] = {
5560 EVENT_PTR(tx_start),
5561 EVENT_PTR(tx_commit),
5562 EVENT_PTR(tx_abort),
5563 EVENT_PTR(tx_capacity),
5564 EVENT_PTR(tx_conflict),
5565 EVENT_PTR(el_start),
5566 EVENT_PTR(el_commit),
5567 EVENT_PTR(el_abort),
5568 EVENT_PTR(el_capacity),
5569 EVENT_PTR(el_conflict),
5570 EVENT_PTR(cycles_t),
5571 EVENT_PTR(cycles_ct),
5572 NULL
5573 };
5574
5575 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5576 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5577 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5578 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5579
5580 static struct attribute *icl_events_attrs[] = {
5581 EVENT_PTR(mem_ld_hsw),
5582 EVENT_PTR(mem_st_hsw),
5583 NULL,
5584 };
5585
5586 static struct attribute *icl_td_events_attrs[] = {
5587 EVENT_PTR(slots),
5588 EVENT_PTR(td_retiring),
5589 EVENT_PTR(td_bad_spec),
5590 EVENT_PTR(td_fe_bound),
5591 EVENT_PTR(td_be_bound),
5592 NULL,
5593 };
5594
5595 static struct attribute *icl_tsx_events_attrs[] = {
5596 EVENT_PTR(tx_start),
5597 EVENT_PTR(tx_abort),
5598 EVENT_PTR(tx_commit),
5599 EVENT_PTR(tx_capacity_read),
5600 EVENT_PTR(tx_capacity_write),
5601 EVENT_PTR(tx_conflict),
5602 EVENT_PTR(el_start),
5603 EVENT_PTR(el_abort),
5604 EVENT_PTR(el_commit),
5605 EVENT_PTR(el_capacity_read),
5606 EVENT_PTR(el_capacity_write),
5607 EVENT_PTR(el_conflict),
5608 EVENT_PTR(cycles_t),
5609 EVENT_PTR(cycles_ct),
5610 NULL,
5611 };
5612
5613
5614 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5615 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5616
5617 static struct attribute *glc_events_attrs[] = {
5618 EVENT_PTR(mem_ld_hsw),
5619 EVENT_PTR(mem_st_spr),
5620 EVENT_PTR(mem_ld_aux),
5621 NULL,
5622 };
5623
5624 static struct attribute *glc_td_events_attrs[] = {
5625 EVENT_PTR(slots),
5626 EVENT_PTR(td_retiring),
5627 EVENT_PTR(td_bad_spec),
5628 EVENT_PTR(td_fe_bound),
5629 EVENT_PTR(td_be_bound),
5630 EVENT_PTR(td_heavy_ops),
5631 EVENT_PTR(td_br_mispredict),
5632 EVENT_PTR(td_fetch_lat),
5633 EVENT_PTR(td_mem_bound),
5634 NULL,
5635 };
5636
5637 static struct attribute *glc_tsx_events_attrs[] = {
5638 EVENT_PTR(tx_start),
5639 EVENT_PTR(tx_abort),
5640 EVENT_PTR(tx_commit),
5641 EVENT_PTR(tx_capacity_read),
5642 EVENT_PTR(tx_capacity_write),
5643 EVENT_PTR(tx_conflict),
5644 EVENT_PTR(cycles_t),
5645 EVENT_PTR(cycles_ct),
5646 NULL,
5647 };
5648
freeze_on_smi_show(struct device * cdev,struct device_attribute * attr,char * buf)5649 static ssize_t freeze_on_smi_show(struct device *cdev,
5650 struct device_attribute *attr,
5651 char *buf)
5652 {
5653 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5654 }
5655
5656 static DEFINE_MUTEX(freeze_on_smi_mutex);
5657
freeze_on_smi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5658 static ssize_t freeze_on_smi_store(struct device *cdev,
5659 struct device_attribute *attr,
5660 const char *buf, size_t count)
5661 {
5662 unsigned long val;
5663 ssize_t ret;
5664
5665 ret = kstrtoul(buf, 0, &val);
5666 if (ret)
5667 return ret;
5668
5669 if (val > 1)
5670 return -EINVAL;
5671
5672 mutex_lock(&freeze_on_smi_mutex);
5673
5674 if (x86_pmu.attr_freeze_on_smi == val)
5675 goto done;
5676
5677 x86_pmu.attr_freeze_on_smi = val;
5678
5679 cpus_read_lock();
5680 on_each_cpu(flip_smm_bit, &val, 1);
5681 cpus_read_unlock();
5682 done:
5683 mutex_unlock(&freeze_on_smi_mutex);
5684
5685 return count;
5686 }
5687
update_tfa_sched(void * ignored)5688 static void update_tfa_sched(void *ignored)
5689 {
5690 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5691
5692 /*
5693 * check if PMC3 is used
5694 * and if so force schedule out for all event types all contexts
5695 */
5696 if (test_bit(3, cpuc->active_mask))
5697 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5698 }
5699
show_sysctl_tfa(struct device * cdev,struct device_attribute * attr,char * buf)5700 static ssize_t show_sysctl_tfa(struct device *cdev,
5701 struct device_attribute *attr,
5702 char *buf)
5703 {
5704 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5705 }
5706
set_sysctl_tfa(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)5707 static ssize_t set_sysctl_tfa(struct device *cdev,
5708 struct device_attribute *attr,
5709 const char *buf, size_t count)
5710 {
5711 bool val;
5712 ssize_t ret;
5713
5714 ret = kstrtobool(buf, &val);
5715 if (ret)
5716 return ret;
5717
5718 /* no change */
5719 if (val == allow_tsx_force_abort)
5720 return count;
5721
5722 allow_tsx_force_abort = val;
5723
5724 cpus_read_lock();
5725 on_each_cpu(update_tfa_sched, NULL, 1);
5726 cpus_read_unlock();
5727
5728 return count;
5729 }
5730
5731
5732 static DEVICE_ATTR_RW(freeze_on_smi);
5733
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)5734 static ssize_t branches_show(struct device *cdev,
5735 struct device_attribute *attr,
5736 char *buf)
5737 {
5738 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5739 }
5740
5741 static DEVICE_ATTR_RO(branches);
5742
branch_counter_nr_show(struct device * cdev,struct device_attribute * attr,char * buf)5743 static ssize_t branch_counter_nr_show(struct device *cdev,
5744 struct device_attribute *attr,
5745 char *buf)
5746 {
5747 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
5748 }
5749
5750 static DEVICE_ATTR_RO(branch_counter_nr);
5751
branch_counter_width_show(struct device * cdev,struct device_attribute * attr,char * buf)5752 static ssize_t branch_counter_width_show(struct device *cdev,
5753 struct device_attribute *attr,
5754 char *buf)
5755 {
5756 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
5757 }
5758
5759 static DEVICE_ATTR_RO(branch_counter_width);
5760
5761 static struct attribute *lbr_attrs[] = {
5762 &dev_attr_branches.attr,
5763 &dev_attr_branch_counter_nr.attr,
5764 &dev_attr_branch_counter_width.attr,
5765 NULL
5766 };
5767
5768 static umode_t
lbr_is_visible(struct kobject * kobj,struct attribute * attr,int i)5769 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5770 {
5771 /* branches */
5772 if (i == 0)
5773 return x86_pmu.lbr_nr ? attr->mode : 0;
5774
5775 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
5776 }
5777
5778 static char pmu_name_str[30];
5779
5780 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
5781
5782 static struct attribute *intel_pmu_caps_attrs[] = {
5783 &dev_attr_pmu_name.attr.attr,
5784 NULL
5785 };
5786
5787 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5788 show_sysctl_tfa,
5789 set_sysctl_tfa);
5790
5791 static struct attribute *intel_pmu_attrs[] = {
5792 &dev_attr_freeze_on_smi.attr,
5793 &dev_attr_allow_tsx_force_abort.attr,
5794 NULL,
5795 };
5796
5797 static umode_t
default_is_visible(struct kobject * kobj,struct attribute * attr,int i)5798 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5799 {
5800 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5801 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5802
5803 return attr->mode;
5804 }
5805
5806 static umode_t
tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)5807 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5808 {
5809 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5810 }
5811
5812 static umode_t
pebs_is_visible(struct kobject * kobj,struct attribute * attr,int i)5813 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5814 {
5815 return x86_pmu.pebs ? attr->mode : 0;
5816 }
5817
5818 static umode_t
mem_is_visible(struct kobject * kobj,struct attribute * attr,int i)5819 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5820 {
5821 if (attr == &event_attr_mem_ld_aux.attr.attr)
5822 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
5823
5824 return pebs_is_visible(kobj, attr, i);
5825 }
5826
5827 static umode_t
exra_is_visible(struct kobject * kobj,struct attribute * attr,int i)5828 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5829 {
5830 return x86_pmu.version >= 2 ? attr->mode : 0;
5831 }
5832
5833 static umode_t
td_is_visible(struct kobject * kobj,struct attribute * attr,int i)5834 td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5835 {
5836 /*
5837 * Hide the perf metrics topdown events
5838 * if the feature is not enumerated.
5839 */
5840 if (x86_pmu.num_topdown_events)
5841 return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
5842
5843 return attr->mode;
5844 }
5845
5846 static struct attribute_group group_events_td = {
5847 .name = "events",
5848 .is_visible = td_is_visible,
5849 };
5850
5851 static struct attribute_group group_events_mem = {
5852 .name = "events",
5853 .is_visible = mem_is_visible,
5854 };
5855
5856 static struct attribute_group group_events_tsx = {
5857 .name = "events",
5858 .is_visible = tsx_is_visible,
5859 };
5860
5861 static struct attribute_group group_caps_gen = {
5862 .name = "caps",
5863 .attrs = intel_pmu_caps_attrs,
5864 };
5865
5866 static struct attribute_group group_caps_lbr = {
5867 .name = "caps",
5868 .attrs = lbr_attrs,
5869 .is_visible = lbr_is_visible,
5870 };
5871
5872 static struct attribute_group group_format_extra = {
5873 .name = "format",
5874 .is_visible = exra_is_visible,
5875 };
5876
5877 static struct attribute_group group_format_extra_skl = {
5878 .name = "format",
5879 .is_visible = exra_is_visible,
5880 };
5881
5882 static struct attribute_group group_format_evtsel_ext = {
5883 .name = "format",
5884 .attrs = format_evtsel_ext_attrs,
5885 .is_visible = evtsel_ext_is_visible,
5886 };
5887
5888 static struct attribute_group group_default = {
5889 .attrs = intel_pmu_attrs,
5890 .is_visible = default_is_visible,
5891 };
5892
5893 static const struct attribute_group *attr_update[] = {
5894 &group_events_td,
5895 &group_events_mem,
5896 &group_events_tsx,
5897 &group_caps_gen,
5898 &group_caps_lbr,
5899 &group_format_extra,
5900 &group_format_extra_skl,
5901 &group_format_evtsel_ext,
5902 &group_default,
5903 NULL,
5904 };
5905
5906 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5907 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5908 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5909 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5910 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5911 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5912 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5913 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5914 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5915
5916 static struct attribute *adl_hybrid_events_attrs[] = {
5917 EVENT_PTR(slots_adl),
5918 EVENT_PTR(td_retiring_adl),
5919 EVENT_PTR(td_bad_spec_adl),
5920 EVENT_PTR(td_fe_bound_adl),
5921 EVENT_PTR(td_be_bound_adl),
5922 EVENT_PTR(td_heavy_ops_adl),
5923 EVENT_PTR(td_br_mis_adl),
5924 EVENT_PTR(td_fetch_lat_adl),
5925 EVENT_PTR(td_mem_bound_adl),
5926 NULL,
5927 };
5928
5929 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
5930 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
5931 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
5932
5933 static struct attribute *lnl_hybrid_events_attrs[] = {
5934 EVENT_PTR(slots_adl),
5935 EVENT_PTR(td_retiring_lnl),
5936 EVENT_PTR(td_bad_spec_adl),
5937 EVENT_PTR(td_fe_bound_lnl),
5938 EVENT_PTR(td_be_bound_lnl),
5939 EVENT_PTR(td_heavy_ops_adl),
5940 EVENT_PTR(td_br_mis_adl),
5941 EVENT_PTR(td_fetch_lat_adl),
5942 EVENT_PTR(td_mem_bound_adl),
5943 NULL
5944 };
5945
5946 /* Must be in IDX order */
5947 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5948 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5949 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5950
5951 static struct attribute *adl_hybrid_mem_attrs[] = {
5952 EVENT_PTR(mem_ld_adl),
5953 EVENT_PTR(mem_st_adl),
5954 EVENT_PTR(mem_ld_aux_adl),
5955 NULL,
5956 };
5957
5958 static struct attribute *mtl_hybrid_mem_attrs[] = {
5959 EVENT_PTR(mem_ld_adl),
5960 EVENT_PTR(mem_st_adl),
5961 NULL
5962 };
5963
5964 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5965 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5966 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5967 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5968 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5969 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5970 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5971 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5972
5973 static struct attribute *adl_hybrid_tsx_attrs[] = {
5974 EVENT_PTR(tx_start_adl),
5975 EVENT_PTR(tx_abort_adl),
5976 EVENT_PTR(tx_commit_adl),
5977 EVENT_PTR(tx_capacity_read_adl),
5978 EVENT_PTR(tx_capacity_write_adl),
5979 EVENT_PTR(tx_conflict_adl),
5980 EVENT_PTR(cycles_t_adl),
5981 EVENT_PTR(cycles_ct_adl),
5982 NULL,
5983 };
5984
5985 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5986 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5987 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5988 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5989 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5990
5991 #define ADL_HYBRID_RTM_FORMAT_ATTR \
5992 FORMAT_HYBRID_PTR(in_tx), \
5993 FORMAT_HYBRID_PTR(in_tx_cp)
5994
5995 #define ADL_HYBRID_FORMAT_ATTR \
5996 FORMAT_HYBRID_PTR(offcore_rsp), \
5997 FORMAT_HYBRID_PTR(ldlat), \
5998 FORMAT_HYBRID_PTR(frontend)
5999
6000 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
6001 ADL_HYBRID_RTM_FORMAT_ATTR,
6002 ADL_HYBRID_FORMAT_ATTR,
6003 NULL
6004 };
6005
6006 static struct attribute *adl_hybrid_extra_attr[] = {
6007 ADL_HYBRID_FORMAT_ATTR,
6008 NULL
6009 };
6010
6011 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small);
6012
6013 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
6014 ADL_HYBRID_RTM_FORMAT_ATTR,
6015 ADL_HYBRID_FORMAT_ATTR,
6016 FORMAT_HYBRID_PTR(snoop_rsp),
6017 NULL
6018 };
6019
6020 static struct attribute *mtl_hybrid_extra_attr[] = {
6021 ADL_HYBRID_FORMAT_ATTR,
6022 FORMAT_HYBRID_PTR(snoop_rsp),
6023 NULL
6024 };
6025
is_attr_for_this_pmu(struct kobject * kobj,struct attribute * attr)6026 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
6027 {
6028 struct device *dev = kobj_to_dev(kobj);
6029 struct x86_hybrid_pmu *pmu =
6030 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6031 struct perf_pmu_events_hybrid_attr *pmu_attr =
6032 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
6033
6034 return pmu->pmu_type & pmu_attr->pmu_type;
6035 }
6036
hybrid_events_is_visible(struct kobject * kobj,struct attribute * attr,int i)6037 static umode_t hybrid_events_is_visible(struct kobject *kobj,
6038 struct attribute *attr, int i)
6039 {
6040 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
6041 }
6042
hybrid_find_supported_cpu(struct x86_hybrid_pmu * pmu)6043 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
6044 {
6045 int cpu = cpumask_first(&pmu->supported_cpus);
6046
6047 return (cpu >= nr_cpu_ids) ? -1 : cpu;
6048 }
6049
hybrid_tsx_is_visible(struct kobject * kobj,struct attribute * attr,int i)6050 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
6051 struct attribute *attr, int i)
6052 {
6053 struct device *dev = kobj_to_dev(kobj);
6054 struct x86_hybrid_pmu *pmu =
6055 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6056 int cpu = hybrid_find_supported_cpu(pmu);
6057
6058 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
6059 }
6060
hybrid_format_is_visible(struct kobject * kobj,struct attribute * attr,int i)6061 static umode_t hybrid_format_is_visible(struct kobject *kobj,
6062 struct attribute *attr, int i)
6063 {
6064 struct device *dev = kobj_to_dev(kobj);
6065 struct x86_hybrid_pmu *pmu =
6066 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6067 struct perf_pmu_format_hybrid_attr *pmu_attr =
6068 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
6069 int cpu = hybrid_find_supported_cpu(pmu);
6070
6071 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
6072 }
6073
hybrid_td_is_visible(struct kobject * kobj,struct attribute * attr,int i)6074 static umode_t hybrid_td_is_visible(struct kobject *kobj,
6075 struct attribute *attr, int i)
6076 {
6077 struct device *dev = kobj_to_dev(kobj);
6078 struct x86_hybrid_pmu *pmu =
6079 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6080
6081 if (!is_attr_for_this_pmu(kobj, attr))
6082 return 0;
6083
6084
6085 /* Only the big core supports perf metrics */
6086 if (pmu->pmu_type == hybrid_big)
6087 return pmu->intel_cap.perf_metrics ? attr->mode : 0;
6088
6089 return attr->mode;
6090 }
6091
6092 static struct attribute_group hybrid_group_events_td = {
6093 .name = "events",
6094 .is_visible = hybrid_td_is_visible,
6095 };
6096
6097 static struct attribute_group hybrid_group_events_mem = {
6098 .name = "events",
6099 .is_visible = hybrid_events_is_visible,
6100 };
6101
6102 static struct attribute_group hybrid_group_events_tsx = {
6103 .name = "events",
6104 .is_visible = hybrid_tsx_is_visible,
6105 };
6106
6107 static struct attribute_group hybrid_group_format_extra = {
6108 .name = "format",
6109 .is_visible = hybrid_format_is_visible,
6110 };
6111
intel_hybrid_get_attr_cpus(struct device * dev,struct device_attribute * attr,char * buf)6112 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
6113 struct device_attribute *attr,
6114 char *buf)
6115 {
6116 struct x86_hybrid_pmu *pmu =
6117 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
6118
6119 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
6120 }
6121
6122 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
6123 static struct attribute *intel_hybrid_cpus_attrs[] = {
6124 &dev_attr_cpus.attr,
6125 NULL,
6126 };
6127
6128 static struct attribute_group hybrid_group_cpus = {
6129 .attrs = intel_hybrid_cpus_attrs,
6130 };
6131
6132 static const struct attribute_group *hybrid_attr_update[] = {
6133 &hybrid_group_events_td,
6134 &hybrid_group_events_mem,
6135 &hybrid_group_events_tsx,
6136 &group_caps_gen,
6137 &group_caps_lbr,
6138 &hybrid_group_format_extra,
6139 &group_format_evtsel_ext,
6140 &group_default,
6141 &hybrid_group_cpus,
6142 NULL,
6143 };
6144
6145 static struct attribute *empty_attrs;
6146
intel_pmu_check_event_constraints(struct event_constraint * event_constraints,u64 cntr_mask,u64 fixed_cntr_mask,u64 intel_ctrl)6147 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
6148 u64 cntr_mask,
6149 u64 fixed_cntr_mask,
6150 u64 intel_ctrl)
6151 {
6152 struct event_constraint *c;
6153
6154 if (!event_constraints)
6155 return;
6156
6157 /*
6158 * event on fixed counter2 (REF_CYCLES) only works on this
6159 * counter, so do not extend mask to generic counters
6160 */
6161 for_each_event_constraint(c, event_constraints) {
6162 /*
6163 * Don't extend the topdown slots and metrics
6164 * events to the generic counters.
6165 */
6166 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
6167 /*
6168 * Disable topdown slots and metrics events,
6169 * if slots event is not in CPUID.
6170 */
6171 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
6172 c->idxmsk64 = 0;
6173 c->weight = hweight64(c->idxmsk64);
6174 continue;
6175 }
6176
6177 if (c->cmask == FIXED_EVENT_FLAGS) {
6178 /* Disabled fixed counters which are not in CPUID */
6179 c->idxmsk64 &= intel_ctrl;
6180
6181 /*
6182 * Don't extend the pseudo-encoding to the
6183 * generic counters
6184 */
6185 if (!use_fixed_pseudo_encoding(c->code))
6186 c->idxmsk64 |= cntr_mask;
6187 }
6188 c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
6189 c->weight = hweight64(c->idxmsk64);
6190 }
6191 }
6192
intel_pmu_check_extra_regs(struct extra_reg * extra_regs)6193 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
6194 {
6195 struct extra_reg *er;
6196
6197 /*
6198 * Access extra MSR may cause #GP under certain circumstances.
6199 * E.g. KVM doesn't support offcore event
6200 * Check all extra_regs here.
6201 */
6202 if (!extra_regs)
6203 return;
6204
6205 for (er = extra_regs; er->msr; er++) {
6206 er->extra_msr_access = check_msr(er->msr, 0x11UL);
6207 /* Disable LBR select mapping */
6208 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
6209 x86_pmu.lbr_sel_map = NULL;
6210 }
6211 }
6212
intel_pmu_v6_addr_offset(int index,bool eventsel)6213 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
6214 {
6215 return MSR_IA32_PMC_V6_STEP * index;
6216 }
6217
6218 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
6219 { hybrid_small, "cpu_atom" },
6220 { hybrid_big, "cpu_core" },
6221 };
6222
intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)6223 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
6224 {
6225 unsigned long pmus_mask = pmus;
6226 struct x86_hybrid_pmu *pmu;
6227 int idx = 0, bit;
6228
6229 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
6230 x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
6231 sizeof(struct x86_hybrid_pmu),
6232 GFP_KERNEL);
6233 if (!x86_pmu.hybrid_pmu)
6234 return -ENOMEM;
6235
6236 static_branch_enable(&perf_is_hybrid);
6237 x86_pmu.filter = intel_pmu_filter;
6238
6239 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
6240 pmu = &x86_pmu.hybrid_pmu[idx++];
6241 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
6242 pmu->name = intel_hybrid_pmu_type_map[bit].name;
6243
6244 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
6245 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
6246 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
6247 pmu->config_mask = X86_RAW_EVENT_MASK;
6248 pmu->unconstrained = (struct event_constraint)
6249 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
6250 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
6251
6252 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6253 if (pmu->pmu_type & hybrid_small) {
6254 pmu->intel_cap.perf_metrics = 0;
6255 pmu->intel_cap.pebs_output_pt_available = 1;
6256 pmu->mid_ack = true;
6257 } else if (pmu->pmu_type & hybrid_big) {
6258 pmu->intel_cap.perf_metrics = 1;
6259 pmu->intel_cap.pebs_output_pt_available = 0;
6260 pmu->late_ack = true;
6261 }
6262 }
6263
6264 return 0;
6265 }
6266
intel_pmu_ref_cycles_ext(void)6267 static __always_inline void intel_pmu_ref_cycles_ext(void)
6268 {
6269 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
6270 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
6271 }
6272
intel_pmu_init_glc(struct pmu * pmu)6273 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
6274 {
6275 x86_pmu.late_ack = true;
6276 x86_pmu.limit_period = glc_limit_period;
6277 x86_pmu.pebs_aliases = NULL;
6278 x86_pmu.pebs_prec_dist = true;
6279 x86_pmu.pebs_block = true;
6280 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6281 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6282 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6283 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6284 x86_pmu.lbr_pt_coexist = true;
6285 x86_pmu.num_topdown_events = 8;
6286 static_call_update(intel_pmu_update_topdown_event,
6287 &icl_update_topdown_event);
6288 static_call_update(intel_pmu_set_topdown_event_period,
6289 &icl_set_topdown_event_period);
6290
6291 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6292 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6293 hybrid(pmu, event_constraints) = intel_glc_event_constraints;
6294 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
6295
6296 intel_pmu_ref_cycles_ext();
6297 }
6298
intel_pmu_init_grt(struct pmu * pmu)6299 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
6300 {
6301 x86_pmu.mid_ack = true;
6302 x86_pmu.limit_period = glc_limit_period;
6303 x86_pmu.pebs_aliases = NULL;
6304 x86_pmu.pebs_prec_dist = true;
6305 x86_pmu.pebs_block = true;
6306 x86_pmu.lbr_pt_coexist = true;
6307 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6308 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6309
6310 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6311 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6312 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6313 hybrid(pmu, event_constraints) = intel_grt_event_constraints;
6314 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
6315 hybrid(pmu, extra_regs) = intel_grt_extra_regs;
6316
6317 intel_pmu_ref_cycles_ext();
6318 }
6319
intel_pmu_init_lnc(struct pmu * pmu)6320 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
6321 {
6322 intel_pmu_init_glc(pmu);
6323 hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
6324 hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
6325 hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
6326 }
6327
intel_pmu_init_skt(struct pmu * pmu)6328 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
6329 {
6330 intel_pmu_init_grt(pmu);
6331 hybrid(pmu, event_constraints) = intel_skt_event_constraints;
6332 hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
6333 }
6334
intel_pmu_init(void)6335 __init int intel_pmu_init(void)
6336 {
6337 struct attribute **extra_skl_attr = &empty_attrs;
6338 struct attribute **extra_attr = &empty_attrs;
6339 struct attribute **td_attr = &empty_attrs;
6340 struct attribute **mem_attr = &empty_attrs;
6341 struct attribute **tsx_attr = &empty_attrs;
6342 union cpuid10_edx edx;
6343 union cpuid10_eax eax;
6344 union cpuid10_ebx ebx;
6345 unsigned int fixed_mask;
6346 bool pmem = false;
6347 int version, i;
6348 char *name;
6349 struct x86_hybrid_pmu *pmu;
6350
6351 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
6352 switch (boot_cpu_data.x86) {
6353 case 0x6:
6354 return p6_pmu_init();
6355 case 0xb:
6356 return knc_pmu_init();
6357 case 0xf:
6358 return p4_pmu_init();
6359 }
6360 return -ENODEV;
6361 }
6362
6363 /*
6364 * Check whether the Architectural PerfMon supports
6365 * Branch Misses Retired hw_event or not.
6366 */
6367 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
6368 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
6369 return -ENODEV;
6370
6371 version = eax.split.version_id;
6372 if (version < 2)
6373 x86_pmu = core_pmu;
6374 else
6375 x86_pmu = intel_pmu;
6376
6377 x86_pmu.version = version;
6378 x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
6379 x86_pmu.cntval_bits = eax.split.bit_width;
6380 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
6381
6382 x86_pmu.events_maskl = ebx.full;
6383 x86_pmu.events_mask_len = eax.split.mask_length;
6384
6385 x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
6386 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
6387
6388 /*
6389 * Quirk: v2 perfmon does not report fixed-purpose events, so
6390 * assume at least 3 events, when not running in a hypervisor:
6391 */
6392 if (version > 1 && version < 5) {
6393 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
6394
6395 x86_pmu.fixed_cntr_mask64 =
6396 GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
6397 } else if (version >= 5)
6398 x86_pmu.fixed_cntr_mask64 = fixed_mask;
6399
6400 if (boot_cpu_has(X86_FEATURE_PDCM)) {
6401 u64 capabilities;
6402
6403 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
6404 x86_pmu.intel_cap.capabilities = capabilities;
6405 }
6406
6407 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
6408 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
6409 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
6410 }
6411
6412 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
6413 intel_pmu_arch_lbr_init();
6414
6415 intel_ds_init();
6416
6417 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6418
6419 if (version >= 5) {
6420 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6421 if (x86_pmu.intel_cap.anythread_deprecated)
6422 pr_cont(" AnyThread deprecated, ");
6423 }
6424
6425 /*
6426 * Install the hw-cache-events table:
6427 */
6428 switch (boot_cpu_data.x86_vfm) {
6429 case INTEL_CORE_YONAH:
6430 pr_cont("Core events, ");
6431 name = "core";
6432 break;
6433
6434 case INTEL_CORE2_MEROM:
6435 x86_add_quirk(intel_clovertown_quirk);
6436 fallthrough;
6437
6438 case INTEL_CORE2_MEROM_L:
6439 case INTEL_CORE2_PENRYN:
6440 case INTEL_CORE2_DUNNINGTON:
6441 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6442 sizeof(hw_cache_event_ids));
6443
6444 intel_pmu_lbr_init_core();
6445
6446 x86_pmu.event_constraints = intel_core2_event_constraints;
6447 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6448 pr_cont("Core2 events, ");
6449 name = "core2";
6450 break;
6451
6452 case INTEL_NEHALEM:
6453 case INTEL_NEHALEM_EP:
6454 case INTEL_NEHALEM_EX:
6455 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6456 sizeof(hw_cache_event_ids));
6457 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6458 sizeof(hw_cache_extra_regs));
6459
6460 intel_pmu_lbr_init_nhm();
6461
6462 x86_pmu.event_constraints = intel_nehalem_event_constraints;
6463 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6464 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6465 x86_pmu.extra_regs = intel_nehalem_extra_regs;
6466 x86_pmu.limit_period = nhm_limit_period;
6467
6468 mem_attr = nhm_mem_events_attrs;
6469
6470 /* UOPS_ISSUED.STALLED_CYCLES */
6471 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6472 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6473 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6474 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6475 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6476
6477 intel_pmu_pebs_data_source_nhm();
6478 x86_add_quirk(intel_nehalem_quirk);
6479 x86_pmu.pebs_no_tlb = 1;
6480 extra_attr = nhm_format_attr;
6481
6482 pr_cont("Nehalem events, ");
6483 name = "nehalem";
6484 break;
6485
6486 case INTEL_ATOM_BONNELL:
6487 case INTEL_ATOM_BONNELL_MID:
6488 case INTEL_ATOM_SALTWELL:
6489 case INTEL_ATOM_SALTWELL_MID:
6490 case INTEL_ATOM_SALTWELL_TABLET:
6491 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6492 sizeof(hw_cache_event_ids));
6493
6494 intel_pmu_lbr_init_atom();
6495
6496 x86_pmu.event_constraints = intel_gen_event_constraints;
6497 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6498 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6499 pr_cont("Atom events, ");
6500 name = "bonnell";
6501 break;
6502
6503 case INTEL_ATOM_SILVERMONT:
6504 case INTEL_ATOM_SILVERMONT_D:
6505 case INTEL_ATOM_SILVERMONT_MID:
6506 case INTEL_ATOM_AIRMONT:
6507 case INTEL_ATOM_AIRMONT_MID:
6508 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6509 sizeof(hw_cache_event_ids));
6510 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6511 sizeof(hw_cache_extra_regs));
6512
6513 intel_pmu_lbr_init_slm();
6514
6515 x86_pmu.event_constraints = intel_slm_event_constraints;
6516 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6517 x86_pmu.extra_regs = intel_slm_extra_regs;
6518 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6519 td_attr = slm_events_attrs;
6520 extra_attr = slm_format_attr;
6521 pr_cont("Silvermont events, ");
6522 name = "silvermont";
6523 break;
6524
6525 case INTEL_ATOM_GOLDMONT:
6526 case INTEL_ATOM_GOLDMONT_D:
6527 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6528 sizeof(hw_cache_event_ids));
6529 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6530 sizeof(hw_cache_extra_regs));
6531
6532 intel_pmu_lbr_init_skl();
6533
6534 x86_pmu.event_constraints = intel_slm_event_constraints;
6535 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6536 x86_pmu.extra_regs = intel_glm_extra_regs;
6537 /*
6538 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6539 * for precise cycles.
6540 * :pp is identical to :ppp
6541 */
6542 x86_pmu.pebs_aliases = NULL;
6543 x86_pmu.pebs_prec_dist = true;
6544 x86_pmu.lbr_pt_coexist = true;
6545 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6546 td_attr = glm_events_attrs;
6547 extra_attr = slm_format_attr;
6548 pr_cont("Goldmont events, ");
6549 name = "goldmont";
6550 break;
6551
6552 case INTEL_ATOM_GOLDMONT_PLUS:
6553 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6554 sizeof(hw_cache_event_ids));
6555 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6556 sizeof(hw_cache_extra_regs));
6557
6558 intel_pmu_lbr_init_skl();
6559
6560 x86_pmu.event_constraints = intel_slm_event_constraints;
6561 x86_pmu.extra_regs = intel_glm_extra_regs;
6562 /*
6563 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6564 * for precise cycles.
6565 */
6566 x86_pmu.pebs_aliases = NULL;
6567 x86_pmu.pebs_prec_dist = true;
6568 x86_pmu.lbr_pt_coexist = true;
6569 x86_pmu.pebs_capable = ~0ULL;
6570 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6571 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6572 x86_pmu.get_event_constraints = glp_get_event_constraints;
6573 td_attr = glm_events_attrs;
6574 /* Goldmont Plus has 4-wide pipeline */
6575 event_attr_td_total_slots_scale_glm.event_str = "4";
6576 extra_attr = slm_format_attr;
6577 pr_cont("Goldmont plus events, ");
6578 name = "goldmont_plus";
6579 break;
6580
6581 case INTEL_ATOM_TREMONT_D:
6582 case INTEL_ATOM_TREMONT:
6583 case INTEL_ATOM_TREMONT_L:
6584 x86_pmu.late_ack = true;
6585 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6586 sizeof(hw_cache_event_ids));
6587 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6588 sizeof(hw_cache_extra_regs));
6589 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6590
6591 intel_pmu_lbr_init_skl();
6592
6593 x86_pmu.event_constraints = intel_slm_event_constraints;
6594 x86_pmu.extra_regs = intel_tnt_extra_regs;
6595 /*
6596 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6597 * for precise cycles.
6598 */
6599 x86_pmu.pebs_aliases = NULL;
6600 x86_pmu.pebs_prec_dist = true;
6601 x86_pmu.lbr_pt_coexist = true;
6602 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6603 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6604 td_attr = tnt_events_attrs;
6605 extra_attr = slm_format_attr;
6606 pr_cont("Tremont events, ");
6607 name = "Tremont";
6608 break;
6609
6610 case INTEL_ATOM_GRACEMONT:
6611 intel_pmu_init_grt(NULL);
6612 intel_pmu_pebs_data_source_grt();
6613 x86_pmu.pebs_latency_data = grt_latency_data;
6614 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6615 td_attr = tnt_events_attrs;
6616 mem_attr = grt_mem_attrs;
6617 extra_attr = nhm_format_attr;
6618 pr_cont("Gracemont events, ");
6619 name = "gracemont";
6620 break;
6621
6622 case INTEL_ATOM_CRESTMONT:
6623 case INTEL_ATOM_CRESTMONT_X:
6624 intel_pmu_init_grt(NULL);
6625 x86_pmu.extra_regs = intel_cmt_extra_regs;
6626 intel_pmu_pebs_data_source_cmt();
6627 x86_pmu.pebs_latency_data = cmt_latency_data;
6628 x86_pmu.get_event_constraints = cmt_get_event_constraints;
6629 td_attr = cmt_events_attrs;
6630 mem_attr = grt_mem_attrs;
6631 extra_attr = cmt_format_attr;
6632 pr_cont("Crestmont events, ");
6633 name = "crestmont";
6634 break;
6635
6636 case INTEL_WESTMERE:
6637 case INTEL_WESTMERE_EP:
6638 case INTEL_WESTMERE_EX:
6639 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6640 sizeof(hw_cache_event_ids));
6641 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6642 sizeof(hw_cache_extra_regs));
6643
6644 intel_pmu_lbr_init_nhm();
6645
6646 x86_pmu.event_constraints = intel_westmere_event_constraints;
6647 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6648 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6649 x86_pmu.extra_regs = intel_westmere_extra_regs;
6650 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6651
6652 mem_attr = nhm_mem_events_attrs;
6653
6654 /* UOPS_ISSUED.STALLED_CYCLES */
6655 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6656 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6657 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6658 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6659 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6660
6661 intel_pmu_pebs_data_source_nhm();
6662 extra_attr = nhm_format_attr;
6663 pr_cont("Westmere events, ");
6664 name = "westmere";
6665 break;
6666
6667 case INTEL_SANDYBRIDGE:
6668 case INTEL_SANDYBRIDGE_X:
6669 x86_add_quirk(intel_sandybridge_quirk);
6670 x86_add_quirk(intel_ht_bug);
6671 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6672 sizeof(hw_cache_event_ids));
6673 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6674 sizeof(hw_cache_extra_regs));
6675
6676 intel_pmu_lbr_init_snb();
6677
6678 x86_pmu.event_constraints = intel_snb_event_constraints;
6679 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6680 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6681 if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
6682 x86_pmu.extra_regs = intel_snbep_extra_regs;
6683 else
6684 x86_pmu.extra_regs = intel_snb_extra_regs;
6685
6686
6687 /* all extra regs are per-cpu when HT is on */
6688 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6689 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6690
6691 td_attr = snb_events_attrs;
6692 mem_attr = snb_mem_events_attrs;
6693
6694 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6695 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6696 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6697 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6698 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6699 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6700
6701 extra_attr = nhm_format_attr;
6702
6703 pr_cont("SandyBridge events, ");
6704 name = "sandybridge";
6705 break;
6706
6707 case INTEL_IVYBRIDGE:
6708 case INTEL_IVYBRIDGE_X:
6709 x86_add_quirk(intel_ht_bug);
6710 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6711 sizeof(hw_cache_event_ids));
6712 /* dTLB-load-misses on IVB is different than SNB */
6713 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6714
6715 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6716 sizeof(hw_cache_extra_regs));
6717
6718 intel_pmu_lbr_init_snb();
6719
6720 x86_pmu.event_constraints = intel_ivb_event_constraints;
6721 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6722 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6723 x86_pmu.pebs_prec_dist = true;
6724 if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
6725 x86_pmu.extra_regs = intel_snbep_extra_regs;
6726 else
6727 x86_pmu.extra_regs = intel_snb_extra_regs;
6728 /* all extra regs are per-cpu when HT is on */
6729 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6730 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6731
6732 td_attr = snb_events_attrs;
6733 mem_attr = snb_mem_events_attrs;
6734
6735 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6736 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6737 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6738
6739 extra_attr = nhm_format_attr;
6740
6741 pr_cont("IvyBridge events, ");
6742 name = "ivybridge";
6743 break;
6744
6745
6746 case INTEL_HASWELL:
6747 case INTEL_HASWELL_X:
6748 case INTEL_HASWELL_L:
6749 case INTEL_HASWELL_G:
6750 x86_add_quirk(intel_ht_bug);
6751 x86_add_quirk(intel_pebs_isolation_quirk);
6752 x86_pmu.late_ack = true;
6753 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6754 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6755
6756 intel_pmu_lbr_init_hsw();
6757
6758 x86_pmu.event_constraints = intel_hsw_event_constraints;
6759 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6760 x86_pmu.extra_regs = intel_snbep_extra_regs;
6761 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6762 x86_pmu.pebs_prec_dist = true;
6763 /* all extra regs are per-cpu when HT is on */
6764 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6765 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6766
6767 x86_pmu.hw_config = hsw_hw_config;
6768 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6769 x86_pmu.lbr_double_abort = true;
6770 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6771 hsw_format_attr : nhm_format_attr;
6772 td_attr = hsw_events_attrs;
6773 mem_attr = hsw_mem_events_attrs;
6774 tsx_attr = hsw_tsx_events_attrs;
6775 pr_cont("Haswell events, ");
6776 name = "haswell";
6777 break;
6778
6779 case INTEL_BROADWELL:
6780 case INTEL_BROADWELL_D:
6781 case INTEL_BROADWELL_G:
6782 case INTEL_BROADWELL_X:
6783 x86_add_quirk(intel_pebs_isolation_quirk);
6784 x86_pmu.late_ack = true;
6785 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6786 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6787
6788 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6789 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6790 BDW_L3_MISS|HSW_SNOOP_DRAM;
6791 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6792 HSW_SNOOP_DRAM;
6793 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6794 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6795 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6796 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6797
6798 intel_pmu_lbr_init_hsw();
6799
6800 x86_pmu.event_constraints = intel_bdw_event_constraints;
6801 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6802 x86_pmu.extra_regs = intel_snbep_extra_regs;
6803 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6804 x86_pmu.pebs_prec_dist = true;
6805 /* all extra regs are per-cpu when HT is on */
6806 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6807 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6808
6809 x86_pmu.hw_config = hsw_hw_config;
6810 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6811 x86_pmu.limit_period = bdw_limit_period;
6812 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6813 hsw_format_attr : nhm_format_attr;
6814 td_attr = hsw_events_attrs;
6815 mem_attr = hsw_mem_events_attrs;
6816 tsx_attr = hsw_tsx_events_attrs;
6817 pr_cont("Broadwell events, ");
6818 name = "broadwell";
6819 break;
6820
6821 case INTEL_XEON_PHI_KNL:
6822 case INTEL_XEON_PHI_KNM:
6823 memcpy(hw_cache_event_ids,
6824 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6825 memcpy(hw_cache_extra_regs,
6826 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6827 intel_pmu_lbr_init_knl();
6828
6829 x86_pmu.event_constraints = intel_slm_event_constraints;
6830 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6831 x86_pmu.extra_regs = intel_knl_extra_regs;
6832
6833 /* all extra regs are per-cpu when HT is on */
6834 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6835 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6836 extra_attr = slm_format_attr;
6837 pr_cont("Knights Landing/Mill events, ");
6838 name = "knights-landing";
6839 break;
6840
6841 case INTEL_SKYLAKE_X:
6842 pmem = true;
6843 fallthrough;
6844 case INTEL_SKYLAKE_L:
6845 case INTEL_SKYLAKE:
6846 case INTEL_KABYLAKE_L:
6847 case INTEL_KABYLAKE:
6848 case INTEL_COMETLAKE_L:
6849 case INTEL_COMETLAKE:
6850 x86_add_quirk(intel_pebs_isolation_quirk);
6851 x86_pmu.late_ack = true;
6852 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6853 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6854 intel_pmu_lbr_init_skl();
6855
6856 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6857 event_attr_td_recovery_bubbles.event_str_noht =
6858 "event=0xd,umask=0x1,cmask=1";
6859 event_attr_td_recovery_bubbles.event_str_ht =
6860 "event=0xd,umask=0x1,cmask=1,any=1";
6861
6862 x86_pmu.event_constraints = intel_skl_event_constraints;
6863 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6864 x86_pmu.extra_regs = intel_skl_extra_regs;
6865 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6866 x86_pmu.pebs_prec_dist = true;
6867 /* all extra regs are per-cpu when HT is on */
6868 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6869 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6870
6871 x86_pmu.hw_config = hsw_hw_config;
6872 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6873 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6874 hsw_format_attr : nhm_format_attr;
6875 extra_skl_attr = skl_format_attr;
6876 td_attr = hsw_events_attrs;
6877 mem_attr = hsw_mem_events_attrs;
6878 tsx_attr = hsw_tsx_events_attrs;
6879 intel_pmu_pebs_data_source_skl(pmem);
6880
6881 /*
6882 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6883 * TSX force abort hooks are not required on these systems. Only deploy
6884 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6885 */
6886 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6887 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6888 x86_pmu.flags |= PMU_FL_TFA;
6889 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6890 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6891 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6892 }
6893
6894 pr_cont("Skylake events, ");
6895 name = "skylake";
6896 break;
6897
6898 case INTEL_ICELAKE_X:
6899 case INTEL_ICELAKE_D:
6900 x86_pmu.pebs_ept = 1;
6901 pmem = true;
6902 fallthrough;
6903 case INTEL_ICELAKE_L:
6904 case INTEL_ICELAKE:
6905 case INTEL_TIGERLAKE_L:
6906 case INTEL_TIGERLAKE:
6907 case INTEL_ROCKETLAKE:
6908 x86_pmu.late_ack = true;
6909 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6910 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6911 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6912 intel_pmu_lbr_init_skl();
6913
6914 x86_pmu.event_constraints = intel_icl_event_constraints;
6915 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6916 x86_pmu.extra_regs = intel_icl_extra_regs;
6917 x86_pmu.pebs_aliases = NULL;
6918 x86_pmu.pebs_prec_dist = true;
6919 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6920 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6921
6922 x86_pmu.hw_config = hsw_hw_config;
6923 x86_pmu.get_event_constraints = icl_get_event_constraints;
6924 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6925 hsw_format_attr : nhm_format_attr;
6926 extra_skl_attr = skl_format_attr;
6927 mem_attr = icl_events_attrs;
6928 td_attr = icl_td_events_attrs;
6929 tsx_attr = icl_tsx_events_attrs;
6930 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6931 x86_pmu.lbr_pt_coexist = true;
6932 intel_pmu_pebs_data_source_skl(pmem);
6933 x86_pmu.num_topdown_events = 4;
6934 static_call_update(intel_pmu_update_topdown_event,
6935 &icl_update_topdown_event);
6936 static_call_update(intel_pmu_set_topdown_event_period,
6937 &icl_set_topdown_event_period);
6938 pr_cont("Icelake events, ");
6939 name = "icelake";
6940 break;
6941
6942 case INTEL_SAPPHIRERAPIDS_X:
6943 case INTEL_EMERALDRAPIDS_X:
6944 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6945 x86_pmu.extra_regs = intel_glc_extra_regs;
6946 pr_cont("Sapphire Rapids events, ");
6947 name = "sapphire_rapids";
6948 goto glc_common;
6949
6950 case INTEL_GRANITERAPIDS_X:
6951 case INTEL_GRANITERAPIDS_D:
6952 x86_pmu.extra_regs = intel_rwc_extra_regs;
6953 pr_cont("Granite Rapids events, ");
6954 name = "granite_rapids";
6955
6956 glc_common:
6957 intel_pmu_init_glc(NULL);
6958 x86_pmu.pebs_ept = 1;
6959 x86_pmu.hw_config = hsw_hw_config;
6960 x86_pmu.get_event_constraints = glc_get_event_constraints;
6961 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6962 hsw_format_attr : nhm_format_attr;
6963 extra_skl_attr = skl_format_attr;
6964 mem_attr = glc_events_attrs;
6965 td_attr = glc_td_events_attrs;
6966 tsx_attr = glc_tsx_events_attrs;
6967 intel_pmu_pebs_data_source_skl(true);
6968 break;
6969
6970 case INTEL_ALDERLAKE:
6971 case INTEL_ALDERLAKE_L:
6972 case INTEL_RAPTORLAKE:
6973 case INTEL_RAPTORLAKE_P:
6974 case INTEL_RAPTORLAKE_S:
6975 /*
6976 * Alder Lake has 2 types of CPU, core and atom.
6977 *
6978 * Initialize the common PerfMon capabilities here.
6979 */
6980 intel_pmu_init_hybrid(hybrid_big_small);
6981
6982 x86_pmu.pebs_latency_data = grt_latency_data;
6983 x86_pmu.get_event_constraints = adl_get_event_constraints;
6984 x86_pmu.hw_config = adl_hw_config;
6985 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6986
6987 td_attr = adl_hybrid_events_attrs;
6988 mem_attr = adl_hybrid_mem_attrs;
6989 tsx_attr = adl_hybrid_tsx_attrs;
6990 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6991 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6992
6993 /* Initialize big core specific PerfMon capabilities.*/
6994 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6995 intel_pmu_init_glc(&pmu->pmu);
6996 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6997 pmu->cntr_mask64 <<= 2;
6998 pmu->cntr_mask64 |= 0x3;
6999 pmu->fixed_cntr_mask64 <<= 1;
7000 pmu->fixed_cntr_mask64 |= 0x1;
7001 } else {
7002 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7003 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7004 }
7005
7006 /*
7007 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
7008 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
7009 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
7010 * mistakenly add extra counters for P-cores. Correct the number of
7011 * counters here.
7012 */
7013 if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
7014 pmu->cntr_mask64 = x86_pmu.cntr_mask64;
7015 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
7016 }
7017
7018 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
7019 pmu->unconstrained = (struct event_constraint)
7020 __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
7021 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
7022
7023 pmu->extra_regs = intel_glc_extra_regs;
7024
7025 /* Initialize Atom core specific PerfMon capabilities.*/
7026 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7027 intel_pmu_init_grt(&pmu->pmu);
7028
7029 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
7030 intel_pmu_pebs_data_source_adl();
7031 pr_cont("Alderlake Hybrid events, ");
7032 name = "alderlake_hybrid";
7033 break;
7034
7035 case INTEL_METEORLAKE:
7036 case INTEL_METEORLAKE_L:
7037 intel_pmu_init_hybrid(hybrid_big_small);
7038
7039 x86_pmu.pebs_latency_data = cmt_latency_data;
7040 x86_pmu.get_event_constraints = mtl_get_event_constraints;
7041 x86_pmu.hw_config = adl_hw_config;
7042
7043 td_attr = adl_hybrid_events_attrs;
7044 mem_attr = mtl_hybrid_mem_attrs;
7045 tsx_attr = adl_hybrid_tsx_attrs;
7046 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7047 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7048
7049 /* Initialize big core specific PerfMon capabilities.*/
7050 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7051 intel_pmu_init_glc(&pmu->pmu);
7052 pmu->extra_regs = intel_rwc_extra_regs;
7053
7054 /* Initialize Atom core specific PerfMon capabilities.*/
7055 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7056 intel_pmu_init_grt(&pmu->pmu);
7057 pmu->extra_regs = intel_cmt_extra_regs;
7058
7059 intel_pmu_pebs_data_source_mtl();
7060 pr_cont("Meteorlake Hybrid events, ");
7061 name = "meteorlake_hybrid";
7062 break;
7063
7064 case INTEL_LUNARLAKE_M:
7065 case INTEL_ARROWLAKE:
7066 intel_pmu_init_hybrid(hybrid_big_small);
7067
7068 x86_pmu.pebs_latency_data = lnl_latency_data;
7069 x86_pmu.get_event_constraints = mtl_get_event_constraints;
7070 x86_pmu.hw_config = adl_hw_config;
7071
7072 td_attr = lnl_hybrid_events_attrs;
7073 mem_attr = mtl_hybrid_mem_attrs;
7074 tsx_attr = adl_hybrid_tsx_attrs;
7075 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
7076 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
7077
7078 /* Initialize big core specific PerfMon capabilities.*/
7079 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
7080 intel_pmu_init_lnc(&pmu->pmu);
7081
7082 /* Initialize Atom core specific PerfMon capabilities.*/
7083 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
7084 intel_pmu_init_skt(&pmu->pmu);
7085
7086 intel_pmu_pebs_data_source_lnl();
7087 pr_cont("Lunarlake Hybrid events, ");
7088 name = "lunarlake_hybrid";
7089 break;
7090
7091 default:
7092 switch (x86_pmu.version) {
7093 case 1:
7094 x86_pmu.event_constraints = intel_v1_event_constraints;
7095 pr_cont("generic architected perfmon v1, ");
7096 name = "generic_arch_v1";
7097 break;
7098 case 2:
7099 case 3:
7100 case 4:
7101 /*
7102 * default constraints for v2 and up
7103 */
7104 x86_pmu.event_constraints = intel_gen_event_constraints;
7105 pr_cont("generic architected perfmon, ");
7106 name = "generic_arch_v2+";
7107 break;
7108 default:
7109 /*
7110 * The default constraints for v5 and up can support up to
7111 * 16 fixed counters. For the fixed counters 4 and later,
7112 * the pseudo-encoding is applied.
7113 * The constraints may be cut according to the CPUID enumeration
7114 * by inserting the EVENT_CONSTRAINT_END.
7115 */
7116 if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
7117 x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
7118 intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
7119 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
7120 pr_cont("generic architected perfmon, ");
7121 name = "generic_arch_v5+";
7122 break;
7123 }
7124 }
7125
7126 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
7127
7128 if (!is_hybrid()) {
7129 group_events_td.attrs = td_attr;
7130 group_events_mem.attrs = mem_attr;
7131 group_events_tsx.attrs = tsx_attr;
7132 group_format_extra.attrs = extra_attr;
7133 group_format_extra_skl.attrs = extra_skl_attr;
7134
7135 x86_pmu.attr_update = attr_update;
7136 } else {
7137 hybrid_group_events_td.attrs = td_attr;
7138 hybrid_group_events_mem.attrs = mem_attr;
7139 hybrid_group_events_tsx.attrs = tsx_attr;
7140 hybrid_group_format_extra.attrs = extra_attr;
7141
7142 x86_pmu.attr_update = hybrid_attr_update;
7143 }
7144
7145 intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
7146 &x86_pmu.fixed_cntr_mask64,
7147 &x86_pmu.intel_ctrl);
7148
7149 /* AnyThread may be deprecated on arch perfmon v5 or later */
7150 if (x86_pmu.intel_cap.anythread_deprecated)
7151 x86_pmu.format_attrs = intel_arch_formats_attr;
7152
7153 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
7154 x86_pmu.cntr_mask64,
7155 x86_pmu.fixed_cntr_mask64,
7156 x86_pmu.intel_ctrl);
7157 /*
7158 * Access LBR MSR may cause #GP under certain circumstances.
7159 * Check all LBR MSR here.
7160 * Disable LBR access if any LBR MSRs can not be accessed.
7161 */
7162 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
7163 x86_pmu.lbr_nr = 0;
7164 for (i = 0; i < x86_pmu.lbr_nr; i++) {
7165 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
7166 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
7167 x86_pmu.lbr_nr = 0;
7168 }
7169
7170 if (x86_pmu.lbr_nr) {
7171 intel_pmu_lbr_init();
7172
7173 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
7174
7175 /* only support branch_stack snapshot for perfmon >= v2 */
7176 if (x86_pmu.disable_all == intel_pmu_disable_all) {
7177 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
7178 static_call_update(perf_snapshot_branch_stack,
7179 intel_pmu_snapshot_arch_branch_stack);
7180 } else {
7181 static_call_update(perf_snapshot_branch_stack,
7182 intel_pmu_snapshot_branch_stack);
7183 }
7184 }
7185 }
7186
7187 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
7188
7189 /* Support full width counters using alternative MSR range */
7190 if (x86_pmu.intel_cap.full_width_write) {
7191 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
7192 x86_pmu.perfctr = MSR_IA32_PMC0;
7193 pr_cont("full-width counters, ");
7194 }
7195
7196 /* Support V6+ MSR Aliasing */
7197 if (x86_pmu.version >= 6) {
7198 x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
7199 x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
7200 x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
7201 x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
7202 }
7203
7204 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
7205 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
7206
7207 if (x86_pmu.intel_cap.pebs_timing_info)
7208 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
7209
7210 intel_aux_output_init();
7211
7212 return 0;
7213 }
7214
7215 /*
7216 * HT bug: phase 2 init
7217 * Called once we have valid topology information to check
7218 * whether or not HT is enabled
7219 * If HT is off, then we disable the workaround
7220 */
fixup_ht_bug(void)7221 static __init int fixup_ht_bug(void)
7222 {
7223 int c;
7224 /*
7225 * problem not present on this CPU model, nothing to do
7226 */
7227 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
7228 return 0;
7229
7230 if (topology_max_smt_threads() > 1) {
7231 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
7232 return 0;
7233 }
7234
7235 cpus_read_lock();
7236
7237 hardlockup_detector_perf_stop();
7238
7239 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
7240
7241 x86_pmu.start_scheduling = NULL;
7242 x86_pmu.commit_scheduling = NULL;
7243 x86_pmu.stop_scheduling = NULL;
7244
7245 hardlockup_detector_perf_restart();
7246
7247 for_each_online_cpu(c)
7248 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
7249
7250 cpus_read_unlock();
7251 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
7252 return 0;
7253 }
7254 subsys_initcall(fixup_ht_bug)
7255