1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
4
5 #include <asm/perf_event.h>
6 #include <asm/msr.h>
7 #include <asm/insn.h>
8
9 #include "../perf_event.h"
10
11 static const enum {
12 LBR_EIP_FLAGS = 1,
13 LBR_TSX = 2,
14 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
15 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
16 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
17 };
18
19 /*
20 * Intel LBR_SELECT bits
21 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
22 *
23 * Hardware branch filter (not available on all CPUs)
24 */
25 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
26 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
27 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
28 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
29 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
30 #define LBR_RETURN_BIT 5 /* do not capture near returns */
31 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
32 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
33 #define LBR_FAR_BIT 8 /* do not capture far branches */
34 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
35
36 /*
37 * Following bit only exists in Linux; we mask it out before writing it to
38 * the actual MSR. But it helps the constraint perf code to understand
39 * that this is a separate configuration.
40 */
41 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
42
43 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
44 #define LBR_USER (1 << LBR_USER_BIT)
45 #define LBR_JCC (1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN (1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR (1 << LBR_FAR_BIT)
52 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
53 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
54
55 #define LBR_PLM (LBR_KERNEL | LBR_USER)
56
57 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
58 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
59 #define LBR_IGN 0 /* ignored */
60
61 #define LBR_ANY \
62 (LBR_JCC |\
63 LBR_REL_CALL |\
64 LBR_IND_CALL |\
65 LBR_RETURN |\
66 LBR_REL_JMP |\
67 LBR_IND_JMP |\
68 LBR_FAR)
69
70 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
71 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
72 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
73
74 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
75
76 /*
77 * x86control flow change classification
78 * x86control flow changes include branches, interrupts, traps, faults
79 */
80 enum {
81 X86_BR_NONE = 0, /* unknown */
82
83 X86_BR_USER = 1 << 0, /* branch target is user */
84 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
85
86 X86_BR_CALL = 1 << 2, /* call */
87 X86_BR_RET = 1 << 3, /* return */
88 X86_BR_SYSCALL = 1 << 4, /* syscall */
89 X86_BR_SYSRET = 1 << 5, /* syscall return */
90 X86_BR_INT = 1 << 6, /* sw interrupt */
91 X86_BR_IRET = 1 << 7, /* return from interrupt */
92 X86_BR_JCC = 1 << 8, /* conditional */
93 X86_BR_JMP = 1 << 9, /* jump */
94 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
95 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
96 X86_BR_ABORT = 1 << 12,/* transaction abort */
97 X86_BR_IN_TX = 1 << 13,/* in transaction */
98 X86_BR_NO_TX = 1 << 14,/* not in transaction */
99 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
100 X86_BR_CALL_STACK = 1 << 16,/* call stack */
101 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
102
103 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
104
105 };
106
107 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
108 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
109
110 #define X86_BR_ANY \
111 (X86_BR_CALL |\
112 X86_BR_RET |\
113 X86_BR_SYSCALL |\
114 X86_BR_SYSRET |\
115 X86_BR_INT |\
116 X86_BR_IRET |\
117 X86_BR_JCC |\
118 X86_BR_JMP |\
119 X86_BR_IRQ |\
120 X86_BR_ABORT |\
121 X86_BR_IND_CALL |\
122 X86_BR_IND_JMP |\
123 X86_BR_ZERO_CALL)
124
125 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
126
127 #define X86_BR_ANY_CALL \
128 (X86_BR_CALL |\
129 X86_BR_IND_CALL |\
130 X86_BR_ZERO_CALL |\
131 X86_BR_SYSCALL |\
132 X86_BR_IRQ |\
133 X86_BR_INT)
134
135 /*
136 * Intel LBR_CTL bits
137 *
138 * Hardware branch filter for Arch LBR
139 */
140 #define ARCH_LBR_KERNEL_BIT 1 /* capture at ring0 */
141 #define ARCH_LBR_USER_BIT 2 /* capture at ring > 0 */
142 #define ARCH_LBR_CALL_STACK_BIT 3 /* enable call stack */
143 #define ARCH_LBR_JCC_BIT 16 /* capture conditional branches */
144 #define ARCH_LBR_REL_JMP_BIT 17 /* capture relative jumps */
145 #define ARCH_LBR_IND_JMP_BIT 18 /* capture indirect jumps */
146 #define ARCH_LBR_REL_CALL_BIT 19 /* capture relative calls */
147 #define ARCH_LBR_IND_CALL_BIT 20 /* capture indirect calls */
148 #define ARCH_LBR_RETURN_BIT 21 /* capture near returns */
149 #define ARCH_LBR_OTHER_BRANCH_BIT 22 /* capture other branches */
150
151 #define ARCH_LBR_KERNEL (1ULL << ARCH_LBR_KERNEL_BIT)
152 #define ARCH_LBR_USER (1ULL << ARCH_LBR_USER_BIT)
153 #define ARCH_LBR_CALL_STACK (1ULL << ARCH_LBR_CALL_STACK_BIT)
154 #define ARCH_LBR_JCC (1ULL << ARCH_LBR_JCC_BIT)
155 #define ARCH_LBR_REL_JMP (1ULL << ARCH_LBR_REL_JMP_BIT)
156 #define ARCH_LBR_IND_JMP (1ULL << ARCH_LBR_IND_JMP_BIT)
157 #define ARCH_LBR_REL_CALL (1ULL << ARCH_LBR_REL_CALL_BIT)
158 #define ARCH_LBR_IND_CALL (1ULL << ARCH_LBR_IND_CALL_BIT)
159 #define ARCH_LBR_RETURN (1ULL << ARCH_LBR_RETURN_BIT)
160 #define ARCH_LBR_OTHER_BRANCH (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
161
162 #define ARCH_LBR_ANY \
163 (ARCH_LBR_JCC |\
164 ARCH_LBR_REL_JMP |\
165 ARCH_LBR_IND_JMP |\
166 ARCH_LBR_REL_CALL |\
167 ARCH_LBR_IND_CALL |\
168 ARCH_LBR_RETURN |\
169 ARCH_LBR_OTHER_BRANCH)
170
171 #define ARCH_LBR_CTL_MASK 0x7f000e
172
173 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
174
is_lbr_call_stack_bit_set(u64 config)175 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
176 {
177 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
178 return !!(config & ARCH_LBR_CALL_STACK);
179
180 return !!(config & LBR_CALL_STACK);
181 }
182
183 /*
184 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
185 * otherwise it becomes near impossible to get a reliable stack.
186 */
187
__intel_pmu_lbr_enable(bool pmi)188 static void __intel_pmu_lbr_enable(bool pmi)
189 {
190 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
191 u64 debugctl, lbr_select = 0, orig_debugctl;
192
193 /*
194 * No need to unfreeze manually, as v4 can do that as part
195 * of the GLOBAL_STATUS ack.
196 */
197 if (pmi && x86_pmu.version >= 4)
198 return;
199
200 /*
201 * No need to reprogram LBR_SELECT in a PMI, as it
202 * did not change.
203 */
204 if (cpuc->lbr_sel)
205 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
206 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
207 wrmsrl(MSR_LBR_SELECT, lbr_select);
208
209 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
210 orig_debugctl = debugctl;
211
212 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
213 debugctl |= DEBUGCTLMSR_LBR;
214 /*
215 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
216 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
217 * may cause superfluous increase/decrease of LBR_TOS.
218 */
219 if (is_lbr_call_stack_bit_set(lbr_select))
220 debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
221 else
222 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
223
224 if (orig_debugctl != debugctl)
225 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
226
227 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
228 wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
229 }
230
__intel_pmu_lbr_disable(void)231 static void __intel_pmu_lbr_disable(void)
232 {
233 u64 debugctl;
234
235 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
236 wrmsrl(MSR_ARCH_LBR_CTL, 0);
237 return;
238 }
239
240 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
241 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
242 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
243 }
244
intel_pmu_lbr_reset_32(void)245 void intel_pmu_lbr_reset_32(void)
246 {
247 int i;
248
249 for (i = 0; i < x86_pmu.lbr_nr; i++)
250 wrmsrl(x86_pmu.lbr_from + i, 0);
251 }
252
intel_pmu_lbr_reset_64(void)253 void intel_pmu_lbr_reset_64(void)
254 {
255 int i;
256
257 for (i = 0; i < x86_pmu.lbr_nr; i++) {
258 wrmsrl(x86_pmu.lbr_from + i, 0);
259 wrmsrl(x86_pmu.lbr_to + i, 0);
260 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
261 wrmsrl(x86_pmu.lbr_info + i, 0);
262 }
263 }
264
intel_pmu_arch_lbr_reset(void)265 static void intel_pmu_arch_lbr_reset(void)
266 {
267 /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
268 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
269 }
270
intel_pmu_lbr_reset(void)271 void intel_pmu_lbr_reset(void)
272 {
273 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
274
275 if (!x86_pmu.lbr_nr)
276 return;
277
278 x86_pmu.lbr_reset();
279
280 cpuc->last_task_ctx = NULL;
281 cpuc->last_log_id = 0;
282 }
283
284 /*
285 * TOS = most recently recorded branch
286 */
intel_pmu_lbr_tos(void)287 static inline u64 intel_pmu_lbr_tos(void)
288 {
289 u64 tos;
290
291 rdmsrl(x86_pmu.lbr_tos, tos);
292 return tos;
293 }
294
295 enum {
296 LBR_NONE,
297 LBR_VALID,
298 };
299
300 /*
301 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
302 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
303 * TSX is not supported they have no consistent behavior:
304 *
305 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
306 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
307 * part of the sign extension.
308 *
309 * Therefore, if:
310 *
311 * 1) LBR has TSX format
312 * 2) CPU has no TSX support enabled
313 *
314 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
315 * value from rdmsr() must be converted to have a 61 bits sign extension,
316 * ignoring the TSX flags.
317 */
lbr_from_signext_quirk_needed(void)318 static inline bool lbr_from_signext_quirk_needed(void)
319 {
320 int lbr_format = x86_pmu.intel_cap.lbr_format;
321 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
322 boot_cpu_has(X86_FEATURE_RTM);
323
324 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
325 }
326
327 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
328
329 /* If quirk is enabled, ensure sign extension is 63 bits: */
lbr_from_signext_quirk_wr(u64 val)330 inline u64 lbr_from_signext_quirk_wr(u64 val)
331 {
332 if (static_branch_unlikely(&lbr_from_quirk_key)) {
333 /*
334 * Sign extend into bits 61:62 while preserving bit 63.
335 *
336 * Quirk is enabled when TSX is disabled. Therefore TSX bits
337 * in val are always OFF and must be changed to be sign
338 * extension bits. Since bits 59:60 are guaranteed to be
339 * part of the sign extension bits, we can just copy them
340 * to 61:62.
341 */
342 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
343 }
344 return val;
345 }
346
347 /*
348 * If quirk is needed, ensure sign extension is 61 bits:
349 */
lbr_from_signext_quirk_rd(u64 val)350 static u64 lbr_from_signext_quirk_rd(u64 val)
351 {
352 if (static_branch_unlikely(&lbr_from_quirk_key)) {
353 /*
354 * Quirk is on when TSX is not enabled. Therefore TSX
355 * flags must be read as OFF.
356 */
357 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
358 }
359 return val;
360 }
361
wrlbr_from(unsigned int idx,u64 val)362 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
363 {
364 val = lbr_from_signext_quirk_wr(val);
365 wrmsrl(x86_pmu.lbr_from + idx, val);
366 }
367
wrlbr_to(unsigned int idx,u64 val)368 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
369 {
370 wrmsrl(x86_pmu.lbr_to + idx, val);
371 }
372
wrlbr_info(unsigned int idx,u64 val)373 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
374 {
375 wrmsrl(x86_pmu.lbr_info + idx, val);
376 }
377
rdlbr_from(unsigned int idx,struct lbr_entry * lbr)378 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
379 {
380 u64 val;
381
382 if (lbr)
383 return lbr->from;
384
385 rdmsrl(x86_pmu.lbr_from + idx, val);
386
387 return lbr_from_signext_quirk_rd(val);
388 }
389
rdlbr_to(unsigned int idx,struct lbr_entry * lbr)390 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
391 {
392 u64 val;
393
394 if (lbr)
395 return lbr->to;
396
397 rdmsrl(x86_pmu.lbr_to + idx, val);
398
399 return val;
400 }
401
rdlbr_info(unsigned int idx,struct lbr_entry * lbr)402 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
403 {
404 u64 val;
405
406 if (lbr)
407 return lbr->info;
408
409 rdmsrl(x86_pmu.lbr_info + idx, val);
410
411 return val;
412 }
413
414 static inline void
wrlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)415 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
416 {
417 wrlbr_from(idx, lbr->from);
418 wrlbr_to(idx, lbr->to);
419 if (need_info)
420 wrlbr_info(idx, lbr->info);
421 }
422
423 static inline bool
rdlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)424 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
425 {
426 u64 from = rdlbr_from(idx, NULL);
427
428 /* Don't read invalid entry */
429 if (!from)
430 return false;
431
432 lbr->from = from;
433 lbr->to = rdlbr_to(idx, NULL);
434 if (need_info)
435 lbr->info = rdlbr_info(idx, NULL);
436
437 return true;
438 }
439
intel_pmu_lbr_restore(void * ctx)440 void intel_pmu_lbr_restore(void *ctx)
441 {
442 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
443 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
444 struct x86_perf_task_context *task_ctx = ctx;
445 int i;
446 unsigned lbr_idx, mask;
447 u64 tos = task_ctx->tos;
448
449 mask = x86_pmu.lbr_nr - 1;
450 for (i = 0; i < task_ctx->valid_lbrs; i++) {
451 lbr_idx = (tos - i) & mask;
452 wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
453 }
454
455 for (; i < x86_pmu.lbr_nr; i++) {
456 lbr_idx = (tos - i) & mask;
457 wrlbr_from(lbr_idx, 0);
458 wrlbr_to(lbr_idx, 0);
459 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
460 wrlbr_info(lbr_idx, 0);
461 }
462
463 wrmsrl(x86_pmu.lbr_tos, tos);
464
465 if (cpuc->lbr_select)
466 wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
467 }
468
intel_pmu_arch_lbr_restore(void * ctx)469 static void intel_pmu_arch_lbr_restore(void *ctx)
470 {
471 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
472 struct lbr_entry *entries = task_ctx->entries;
473 int i;
474
475 /* Fast reset the LBRs before restore if the call stack is not full. */
476 if (!entries[x86_pmu.lbr_nr - 1].from)
477 intel_pmu_arch_lbr_reset();
478
479 for (i = 0; i < x86_pmu.lbr_nr; i++) {
480 if (!entries[i].from)
481 break;
482 wrlbr_all(&entries[i], i, true);
483 }
484 }
485
486 /*
487 * Restore the Architecture LBR state from the xsave area in the perf
488 * context data for the task via the XRSTORS instruction.
489 */
intel_pmu_arch_lbr_xrstors(void * ctx)490 static void intel_pmu_arch_lbr_xrstors(void *ctx)
491 {
492 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
493
494 copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
495 }
496
lbr_is_reset_in_cstate(void * ctx)497 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
498 {
499 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
500 return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
501
502 return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
503 }
504
__intel_pmu_lbr_restore(void * ctx)505 static void __intel_pmu_lbr_restore(void *ctx)
506 {
507 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
508
509 if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
510 task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
511 intel_pmu_lbr_reset();
512 return;
513 }
514
515 /*
516 * Does not restore the LBR registers, if
517 * - No one else touched them, and
518 * - Was not cleared in Cstate
519 */
520 if ((ctx == cpuc->last_task_ctx) &&
521 (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
522 !lbr_is_reset_in_cstate(ctx)) {
523 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
524 return;
525 }
526
527 x86_pmu.lbr_restore(ctx);
528
529 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
530 }
531
intel_pmu_lbr_save(void * ctx)532 void intel_pmu_lbr_save(void *ctx)
533 {
534 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
535 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
536 struct x86_perf_task_context *task_ctx = ctx;
537 unsigned lbr_idx, mask;
538 u64 tos;
539 int i;
540
541 mask = x86_pmu.lbr_nr - 1;
542 tos = intel_pmu_lbr_tos();
543 for (i = 0; i < x86_pmu.lbr_nr; i++) {
544 lbr_idx = (tos - i) & mask;
545 if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
546 break;
547 }
548 task_ctx->valid_lbrs = i;
549 task_ctx->tos = tos;
550
551 if (cpuc->lbr_select)
552 rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
553 }
554
intel_pmu_arch_lbr_save(void * ctx)555 static void intel_pmu_arch_lbr_save(void *ctx)
556 {
557 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
558 struct lbr_entry *entries = task_ctx->entries;
559 int i;
560
561 for (i = 0; i < x86_pmu.lbr_nr; i++) {
562 if (!rdlbr_all(&entries[i], i, true))
563 break;
564 }
565
566 /* LBR call stack is not full. Reset is required in restore. */
567 if (i < x86_pmu.lbr_nr)
568 entries[x86_pmu.lbr_nr - 1].from = 0;
569 }
570
571 /*
572 * Save the Architecture LBR state to the xsave area in the perf
573 * context data for the task via the XSAVES instruction.
574 */
intel_pmu_arch_lbr_xsaves(void * ctx)575 static void intel_pmu_arch_lbr_xsaves(void *ctx)
576 {
577 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
578
579 copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
580 }
581
__intel_pmu_lbr_save(void * ctx)582 static void __intel_pmu_lbr_save(void *ctx)
583 {
584 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
585
586 if (task_context_opt(ctx)->lbr_callstack_users == 0) {
587 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
588 return;
589 }
590
591 x86_pmu.lbr_save(ctx);
592
593 task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
594
595 cpuc->last_task_ctx = ctx;
596 cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
597 }
598
intel_pmu_lbr_swap_task_ctx(struct perf_event_context * prev,struct perf_event_context * next)599 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
600 struct perf_event_context *next)
601 {
602 void *prev_ctx_data, *next_ctx_data;
603
604 swap(prev->task_ctx_data, next->task_ctx_data);
605
606 /*
607 * Architecture specific synchronization makes sense in
608 * case both prev->task_ctx_data and next->task_ctx_data
609 * pointers are allocated.
610 */
611
612 prev_ctx_data = next->task_ctx_data;
613 next_ctx_data = prev->task_ctx_data;
614
615 if (!prev_ctx_data || !next_ctx_data)
616 return;
617
618 swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
619 task_context_opt(next_ctx_data)->lbr_callstack_users);
620 }
621
intel_pmu_lbr_sched_task(struct perf_event_context * ctx,bool sched_in)622 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
623 {
624 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
625 void *task_ctx;
626
627 if (!cpuc->lbr_users)
628 return;
629
630 /*
631 * If LBR callstack feature is enabled and the stack was saved when
632 * the task was scheduled out, restore the stack. Otherwise flush
633 * the LBR stack.
634 */
635 task_ctx = ctx ? ctx->task_ctx_data : NULL;
636 if (task_ctx) {
637 if (sched_in)
638 __intel_pmu_lbr_restore(task_ctx);
639 else
640 __intel_pmu_lbr_save(task_ctx);
641 return;
642 }
643
644 /*
645 * Since a context switch can flip the address space and LBR entries
646 * are not tagged with an identifier, we need to wipe the LBR, even for
647 * per-cpu events. You simply cannot resolve the branches from the old
648 * address space.
649 */
650 if (sched_in)
651 intel_pmu_lbr_reset();
652 }
653
branch_user_callstack(unsigned br_sel)654 static inline bool branch_user_callstack(unsigned br_sel)
655 {
656 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
657 }
658
intel_pmu_lbr_add(struct perf_event * event)659 void intel_pmu_lbr_add(struct perf_event *event)
660 {
661 struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
662 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
663
664 if (!x86_pmu.lbr_nr)
665 return;
666
667 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
668 cpuc->lbr_select = 1;
669
670 cpuc->br_sel = event->hw.branch_reg.reg;
671
672 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
673 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
674
675 /*
676 * Request pmu::sched_task() callback, which will fire inside the
677 * regular perf event scheduling, so that call will:
678 *
679 * - restore or wipe; when LBR-callstack,
680 * - wipe; otherwise,
681 *
682 * when this is from __perf_event_task_sched_in().
683 *
684 * However, if this is from perf_install_in_context(), no such callback
685 * will follow and we'll need to reset the LBR here if this is the
686 * first LBR event.
687 *
688 * The problem is, we cannot tell these cases apart... but we can
689 * exclude the biggest chunk of cases by looking at
690 * event->total_time_running. An event that has accrued runtime cannot
691 * be 'new'. Conversely, a new event can get installed through the
692 * context switch path for the first time.
693 */
694 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
695 cpuc->lbr_pebs_users++;
696 perf_sched_cb_inc(event->ctx->pmu);
697 if (!cpuc->lbr_users++ && !event->total_time_running)
698 intel_pmu_lbr_reset();
699
700 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
701 kmem_cache && !cpuc->lbr_xsave &&
702 (cpuc->lbr_users != cpuc->lbr_pebs_users))
703 cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
704 }
705
release_lbr_buffers(void)706 void release_lbr_buffers(void)
707 {
708 struct kmem_cache *kmem_cache;
709 struct cpu_hw_events *cpuc;
710 int cpu;
711
712 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
713 return;
714
715 for_each_possible_cpu(cpu) {
716 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
717 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
718 if (kmem_cache && cpuc->lbr_xsave) {
719 kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
720 cpuc->lbr_xsave = NULL;
721 }
722 }
723 }
724
intel_pmu_lbr_del(struct perf_event * event)725 void intel_pmu_lbr_del(struct perf_event *event)
726 {
727 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
728
729 if (!x86_pmu.lbr_nr)
730 return;
731
732 if (branch_user_callstack(cpuc->br_sel) &&
733 event->ctx->task_ctx_data)
734 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
735
736 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
737 cpuc->lbr_select = 0;
738
739 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
740 cpuc->lbr_pebs_users--;
741 cpuc->lbr_users--;
742 WARN_ON_ONCE(cpuc->lbr_users < 0);
743 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
744 perf_sched_cb_dec(event->ctx->pmu);
745 }
746
vlbr_exclude_host(void)747 static inline bool vlbr_exclude_host(void)
748 {
749 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
750
751 return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
752 (unsigned long *)&cpuc->intel_ctrl_guest_mask);
753 }
754
intel_pmu_lbr_enable_all(bool pmi)755 void intel_pmu_lbr_enable_all(bool pmi)
756 {
757 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
758
759 if (cpuc->lbr_users && !vlbr_exclude_host())
760 __intel_pmu_lbr_enable(pmi);
761 }
762
intel_pmu_lbr_disable_all(void)763 void intel_pmu_lbr_disable_all(void)
764 {
765 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
766
767 if (cpuc->lbr_users && !vlbr_exclude_host())
768 __intel_pmu_lbr_disable();
769 }
770
intel_pmu_lbr_read_32(struct cpu_hw_events * cpuc)771 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
772 {
773 unsigned long mask = x86_pmu.lbr_nr - 1;
774 u64 tos = intel_pmu_lbr_tos();
775 int i;
776
777 for (i = 0; i < x86_pmu.lbr_nr; i++) {
778 unsigned long lbr_idx = (tos - i) & mask;
779 union {
780 struct {
781 u32 from;
782 u32 to;
783 };
784 u64 lbr;
785 } msr_lastbranch;
786
787 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
788
789 cpuc->lbr_entries[i].from = msr_lastbranch.from;
790 cpuc->lbr_entries[i].to = msr_lastbranch.to;
791 cpuc->lbr_entries[i].mispred = 0;
792 cpuc->lbr_entries[i].predicted = 0;
793 cpuc->lbr_entries[i].in_tx = 0;
794 cpuc->lbr_entries[i].abort = 0;
795 cpuc->lbr_entries[i].cycles = 0;
796 cpuc->lbr_entries[i].type = 0;
797 cpuc->lbr_entries[i].reserved = 0;
798 }
799 cpuc->lbr_stack.nr = i;
800 cpuc->lbr_stack.hw_idx = tos;
801 }
802
803 /*
804 * Due to lack of segmentation in Linux the effective address (offset)
805 * is the same as the linear address, allowing us to merge the LIP and EIP
806 * LBR formats.
807 */
intel_pmu_lbr_read_64(struct cpu_hw_events * cpuc)808 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
809 {
810 bool need_info = false, call_stack = false;
811 unsigned long mask = x86_pmu.lbr_nr - 1;
812 int lbr_format = x86_pmu.intel_cap.lbr_format;
813 u64 tos = intel_pmu_lbr_tos();
814 int i;
815 int out = 0;
816 int num = x86_pmu.lbr_nr;
817
818 if (cpuc->lbr_sel) {
819 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
820 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
821 call_stack = true;
822 }
823
824 for (i = 0; i < num; i++) {
825 unsigned long lbr_idx = (tos - i) & mask;
826 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
827 int skip = 0;
828 u16 cycles = 0;
829 int lbr_flags = lbr_desc[lbr_format];
830
831 from = rdlbr_from(lbr_idx, NULL);
832 to = rdlbr_to(lbr_idx, NULL);
833
834 /*
835 * Read LBR call stack entries
836 * until invalid entry (0s) is detected.
837 */
838 if (call_stack && !from)
839 break;
840
841 if (lbr_format == LBR_FORMAT_INFO && need_info) {
842 u64 info;
843
844 info = rdlbr_info(lbr_idx, NULL);
845 mis = !!(info & LBR_INFO_MISPRED);
846 pred = !mis;
847 in_tx = !!(info & LBR_INFO_IN_TX);
848 abort = !!(info & LBR_INFO_ABORT);
849 cycles = (info & LBR_INFO_CYCLES);
850 }
851
852 if (lbr_format == LBR_FORMAT_TIME) {
853 mis = !!(from & LBR_FROM_FLAG_MISPRED);
854 pred = !mis;
855 skip = 1;
856 cycles = ((to >> 48) & LBR_INFO_CYCLES);
857
858 to = (u64)((((s64)to) << 16) >> 16);
859 }
860
861 if (lbr_flags & LBR_EIP_FLAGS) {
862 mis = !!(from & LBR_FROM_FLAG_MISPRED);
863 pred = !mis;
864 skip = 1;
865 }
866 if (lbr_flags & LBR_TSX) {
867 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
868 abort = !!(from & LBR_FROM_FLAG_ABORT);
869 skip = 3;
870 }
871 from = (u64)((((s64)from) << skip) >> skip);
872
873 /*
874 * Some CPUs report duplicated abort records,
875 * with the second entry not having an abort bit set.
876 * Skip them here. This loop runs backwards,
877 * so we need to undo the previous record.
878 * If the abort just happened outside the window
879 * the extra entry cannot be removed.
880 */
881 if (abort && x86_pmu.lbr_double_abort && out > 0)
882 out--;
883
884 cpuc->lbr_entries[out].from = from;
885 cpuc->lbr_entries[out].to = to;
886 cpuc->lbr_entries[out].mispred = mis;
887 cpuc->lbr_entries[out].predicted = pred;
888 cpuc->lbr_entries[out].in_tx = in_tx;
889 cpuc->lbr_entries[out].abort = abort;
890 cpuc->lbr_entries[out].cycles = cycles;
891 cpuc->lbr_entries[out].type = 0;
892 cpuc->lbr_entries[out].reserved = 0;
893 out++;
894 }
895 cpuc->lbr_stack.nr = out;
896 cpuc->lbr_stack.hw_idx = tos;
897 }
898
get_lbr_br_type(u64 info)899 static __always_inline int get_lbr_br_type(u64 info)
900 {
901 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
902 return 0;
903
904 return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
905 }
906
get_lbr_mispred(u64 info)907 static __always_inline bool get_lbr_mispred(u64 info)
908 {
909 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
910 return 0;
911
912 return !!(info & LBR_INFO_MISPRED);
913 }
914
get_lbr_predicted(u64 info)915 static __always_inline bool get_lbr_predicted(u64 info)
916 {
917 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
918 return 0;
919
920 return !(info & LBR_INFO_MISPRED);
921 }
922
get_lbr_cycles(u64 info)923 static __always_inline u16 get_lbr_cycles(u64 info)
924 {
925 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
926 !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
927 return 0;
928
929 return info & LBR_INFO_CYCLES;
930 }
931
intel_pmu_store_lbr(struct cpu_hw_events * cpuc,struct lbr_entry * entries)932 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
933 struct lbr_entry *entries)
934 {
935 struct perf_branch_entry *e;
936 struct lbr_entry *lbr;
937 u64 from, to, info;
938 int i;
939
940 for (i = 0; i < x86_pmu.lbr_nr; i++) {
941 lbr = entries ? &entries[i] : NULL;
942 e = &cpuc->lbr_entries[i];
943
944 from = rdlbr_from(i, lbr);
945 /*
946 * Read LBR entries until invalid entry (0s) is detected.
947 */
948 if (!from)
949 break;
950
951 to = rdlbr_to(i, lbr);
952 info = rdlbr_info(i, lbr);
953
954 e->from = from;
955 e->to = to;
956 e->mispred = get_lbr_mispred(info);
957 e->predicted = get_lbr_predicted(info);
958 e->in_tx = !!(info & LBR_INFO_IN_TX);
959 e->abort = !!(info & LBR_INFO_ABORT);
960 e->cycles = get_lbr_cycles(info);
961 e->type = get_lbr_br_type(info);
962 e->reserved = 0;
963 }
964
965 cpuc->lbr_stack.nr = i;
966 }
967
intel_pmu_arch_lbr_read(struct cpu_hw_events * cpuc)968 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
969 {
970 intel_pmu_store_lbr(cpuc, NULL);
971 }
972
intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events * cpuc)973 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
974 {
975 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
976
977 if (!xsave) {
978 intel_pmu_store_lbr(cpuc, NULL);
979 return;
980 }
981 copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
982
983 intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
984 }
985
intel_pmu_lbr_read(void)986 void intel_pmu_lbr_read(void)
987 {
988 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
989
990 /*
991 * Don't read when all LBRs users are using adaptive PEBS.
992 *
993 * This could be smarter and actually check the event,
994 * but this simple approach seems to work for now.
995 */
996 if (!cpuc->lbr_users || vlbr_exclude_host() ||
997 cpuc->lbr_users == cpuc->lbr_pebs_users)
998 return;
999
1000 x86_pmu.lbr_read(cpuc);
1001
1002 intel_pmu_lbr_filter(cpuc);
1003 }
1004
1005 /*
1006 * SW filter is used:
1007 * - in case there is no HW filter
1008 * - in case the HW filter has errata or limitations
1009 */
intel_pmu_setup_sw_lbr_filter(struct perf_event * event)1010 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1011 {
1012 u64 br_type = event->attr.branch_sample_type;
1013 int mask = 0;
1014
1015 if (br_type & PERF_SAMPLE_BRANCH_USER)
1016 mask |= X86_BR_USER;
1017
1018 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1019 mask |= X86_BR_KERNEL;
1020
1021 /* we ignore BRANCH_HV here */
1022
1023 if (br_type & PERF_SAMPLE_BRANCH_ANY)
1024 mask |= X86_BR_ANY;
1025
1026 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1027 mask |= X86_BR_ANY_CALL;
1028
1029 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1030 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1031
1032 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1033 mask |= X86_BR_IND_CALL;
1034
1035 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1036 mask |= X86_BR_ABORT;
1037
1038 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1039 mask |= X86_BR_IN_TX;
1040
1041 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1042 mask |= X86_BR_NO_TX;
1043
1044 if (br_type & PERF_SAMPLE_BRANCH_COND)
1045 mask |= X86_BR_JCC;
1046
1047 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1048 if (!x86_pmu_has_lbr_callstack())
1049 return -EOPNOTSUPP;
1050 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1051 return -EINVAL;
1052 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1053 X86_BR_CALL_STACK;
1054 }
1055
1056 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1057 mask |= X86_BR_IND_JMP;
1058
1059 if (br_type & PERF_SAMPLE_BRANCH_CALL)
1060 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1061
1062 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1063 mask |= X86_BR_TYPE_SAVE;
1064
1065 /*
1066 * stash actual user request into reg, it may
1067 * be used by fixup code for some CPU
1068 */
1069 event->hw.branch_reg.reg = mask;
1070 return 0;
1071 }
1072
1073 /*
1074 * setup the HW LBR filter
1075 * Used only when available, may not be enough to disambiguate
1076 * all branches, may need the help of the SW filter
1077 */
intel_pmu_setup_hw_lbr_filter(struct perf_event * event)1078 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1079 {
1080 struct hw_perf_event_extra *reg;
1081 u64 br_type = event->attr.branch_sample_type;
1082 u64 mask = 0, v;
1083 int i;
1084
1085 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1086 if (!(br_type & (1ULL << i)))
1087 continue;
1088
1089 v = x86_pmu.lbr_sel_map[i];
1090 if (v == LBR_NOT_SUPP)
1091 return -EOPNOTSUPP;
1092
1093 if (v != LBR_IGN)
1094 mask |= v;
1095 }
1096
1097 reg = &event->hw.branch_reg;
1098 reg->idx = EXTRA_REG_LBR;
1099
1100 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1101 reg->config = mask;
1102 return 0;
1103 }
1104
1105 /*
1106 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1107 * in suppress mode. So LBR_SELECT should be set to
1108 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1109 * But the 10th bit LBR_CALL_STACK does not operate
1110 * in suppress mode.
1111 */
1112 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1113
1114 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1115 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1116 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1117 reg->config |= LBR_NO_INFO;
1118
1119 return 0;
1120 }
1121
intel_pmu_setup_lbr_filter(struct perf_event * event)1122 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1123 {
1124 int ret = 0;
1125
1126 /*
1127 * no LBR on this PMU
1128 */
1129 if (!x86_pmu.lbr_nr)
1130 return -EOPNOTSUPP;
1131
1132 /*
1133 * setup SW LBR filter
1134 */
1135 ret = intel_pmu_setup_sw_lbr_filter(event);
1136 if (ret)
1137 return ret;
1138
1139 /*
1140 * setup HW LBR filter, if any
1141 */
1142 if (x86_pmu.lbr_sel_map)
1143 ret = intel_pmu_setup_hw_lbr_filter(event);
1144
1145 return ret;
1146 }
1147
1148 /*
1149 * return the type of control flow change at address "from"
1150 * instruction is not necessarily a branch (in case of interrupt).
1151 *
1152 * The branch type returned also includes the priv level of the
1153 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1154 *
1155 * If a branch type is unknown OR the instruction cannot be
1156 * decoded (e.g., text page not present), then X86_BR_NONE is
1157 * returned.
1158 */
branch_type(unsigned long from,unsigned long to,int abort)1159 static int branch_type(unsigned long from, unsigned long to, int abort)
1160 {
1161 struct insn insn;
1162 void *addr;
1163 int bytes_read, bytes_left;
1164 int ret = X86_BR_NONE;
1165 int ext, to_plm, from_plm;
1166 u8 buf[MAX_INSN_SIZE];
1167 int is64 = 0;
1168
1169 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1170 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1171
1172 /*
1173 * maybe zero if lbr did not fill up after a reset by the time
1174 * we get a PMU interrupt
1175 */
1176 if (from == 0 || to == 0)
1177 return X86_BR_NONE;
1178
1179 if (abort)
1180 return X86_BR_ABORT | to_plm;
1181
1182 if (from_plm == X86_BR_USER) {
1183 /*
1184 * can happen if measuring at the user level only
1185 * and we interrupt in a kernel thread, e.g., idle.
1186 */
1187 if (!current->mm)
1188 return X86_BR_NONE;
1189
1190 /* may fail if text not present */
1191 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1192 MAX_INSN_SIZE);
1193 bytes_read = MAX_INSN_SIZE - bytes_left;
1194 if (!bytes_read)
1195 return X86_BR_NONE;
1196
1197 addr = buf;
1198 } else {
1199 /*
1200 * The LBR logs any address in the IP, even if the IP just
1201 * faulted. This means userspace can control the from address.
1202 * Ensure we don't blindly read any address by validating it is
1203 * a known text address.
1204 */
1205 if (kernel_text_address(from)) {
1206 addr = (void *)from;
1207 /*
1208 * Assume we can get the maximum possible size
1209 * when grabbing kernel data. This is not
1210 * _strictly_ true since we could possibly be
1211 * executing up next to a memory hole, but
1212 * it is very unlikely to be a problem.
1213 */
1214 bytes_read = MAX_INSN_SIZE;
1215 } else {
1216 return X86_BR_NONE;
1217 }
1218 }
1219
1220 /*
1221 * decoder needs to know the ABI especially
1222 * on 64-bit systems running 32-bit apps
1223 */
1224 #ifdef CONFIG_X86_64
1225 is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
1226 #endif
1227 insn_init(&insn, addr, bytes_read, is64);
1228 if (insn_get_opcode(&insn))
1229 return X86_BR_ABORT;
1230
1231 switch (insn.opcode.bytes[0]) {
1232 case 0xf:
1233 switch (insn.opcode.bytes[1]) {
1234 case 0x05: /* syscall */
1235 case 0x34: /* sysenter */
1236 ret = X86_BR_SYSCALL;
1237 break;
1238 case 0x07: /* sysret */
1239 case 0x35: /* sysexit */
1240 ret = X86_BR_SYSRET;
1241 break;
1242 case 0x80 ... 0x8f: /* conditional */
1243 ret = X86_BR_JCC;
1244 break;
1245 default:
1246 ret = X86_BR_NONE;
1247 }
1248 break;
1249 case 0x70 ... 0x7f: /* conditional */
1250 ret = X86_BR_JCC;
1251 break;
1252 case 0xc2: /* near ret */
1253 case 0xc3: /* near ret */
1254 case 0xca: /* far ret */
1255 case 0xcb: /* far ret */
1256 ret = X86_BR_RET;
1257 break;
1258 case 0xcf: /* iret */
1259 ret = X86_BR_IRET;
1260 break;
1261 case 0xcc ... 0xce: /* int */
1262 ret = X86_BR_INT;
1263 break;
1264 case 0xe8: /* call near rel */
1265 if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
1266 /* zero length call */
1267 ret = X86_BR_ZERO_CALL;
1268 break;
1269 }
1270 fallthrough;
1271 case 0x9a: /* call far absolute */
1272 ret = X86_BR_CALL;
1273 break;
1274 case 0xe0 ... 0xe3: /* loop jmp */
1275 ret = X86_BR_JCC;
1276 break;
1277 case 0xe9 ... 0xeb: /* jmp */
1278 ret = X86_BR_JMP;
1279 break;
1280 case 0xff: /* call near absolute, call far absolute ind */
1281 if (insn_get_modrm(&insn))
1282 return X86_BR_ABORT;
1283
1284 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1285 switch (ext) {
1286 case 2: /* near ind call */
1287 case 3: /* far ind call */
1288 ret = X86_BR_IND_CALL;
1289 break;
1290 case 4:
1291 case 5:
1292 ret = X86_BR_IND_JMP;
1293 break;
1294 }
1295 break;
1296 default:
1297 ret = X86_BR_NONE;
1298 }
1299 /*
1300 * interrupts, traps, faults (and thus ring transition) may
1301 * occur on any instructions. Thus, to classify them correctly,
1302 * we need to first look at the from and to priv levels. If they
1303 * are different and to is in the kernel, then it indicates
1304 * a ring transition. If the from instruction is not a ring
1305 * transition instr (syscall, systenter, int), then it means
1306 * it was a irq, trap or fault.
1307 *
1308 * we have no way of detecting kernel to kernel faults.
1309 */
1310 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1311 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1312 ret = X86_BR_IRQ;
1313
1314 /*
1315 * branch priv level determined by target as
1316 * is done by HW when LBR_SELECT is implemented
1317 */
1318 if (ret != X86_BR_NONE)
1319 ret |= to_plm;
1320
1321 return ret;
1322 }
1323
1324 #define X86_BR_TYPE_MAP_MAX 16
1325
1326 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1327 PERF_BR_CALL, /* X86_BR_CALL */
1328 PERF_BR_RET, /* X86_BR_RET */
1329 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
1330 PERF_BR_SYSRET, /* X86_BR_SYSRET */
1331 PERF_BR_UNKNOWN, /* X86_BR_INT */
1332 PERF_BR_UNKNOWN, /* X86_BR_IRET */
1333 PERF_BR_COND, /* X86_BR_JCC */
1334 PERF_BR_UNCOND, /* X86_BR_JMP */
1335 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
1336 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
1337 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
1338 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
1339 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
1340 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
1341 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
1342 PERF_BR_IND, /* X86_BR_IND_JMP */
1343 };
1344
1345 static int
common_branch_type(int type)1346 common_branch_type(int type)
1347 {
1348 int i;
1349
1350 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1351
1352 if (type) {
1353 i = __ffs(type);
1354 if (i < X86_BR_TYPE_MAP_MAX)
1355 return branch_map[i];
1356 }
1357
1358 return PERF_BR_UNKNOWN;
1359 }
1360
1361 enum {
1362 ARCH_LBR_BR_TYPE_JCC = 0,
1363 ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1,
1364 ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2,
1365 ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3,
1366 ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4,
1367 ARCH_LBR_BR_TYPE_NEAR_RET = 5,
1368 ARCH_LBR_BR_TYPE_KNOWN_MAX = ARCH_LBR_BR_TYPE_NEAR_RET,
1369
1370 ARCH_LBR_BR_TYPE_MAP_MAX = 16,
1371 };
1372
1373 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1374 [ARCH_LBR_BR_TYPE_JCC] = X86_BR_JCC,
1375 [ARCH_LBR_BR_TYPE_NEAR_IND_JMP] = X86_BR_IND_JMP,
1376 [ARCH_LBR_BR_TYPE_NEAR_REL_JMP] = X86_BR_JMP,
1377 [ARCH_LBR_BR_TYPE_NEAR_IND_CALL] = X86_BR_IND_CALL,
1378 [ARCH_LBR_BR_TYPE_NEAR_REL_CALL] = X86_BR_CALL,
1379 [ARCH_LBR_BR_TYPE_NEAR_RET] = X86_BR_RET,
1380 };
1381
1382 /*
1383 * implement actual branch filter based on user demand.
1384 * Hardware may not exactly satisfy that request, thus
1385 * we need to inspect opcodes. Mismatched branches are
1386 * discarded. Therefore, the number of branches returned
1387 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1388 */
1389 static void
intel_pmu_lbr_filter(struct cpu_hw_events * cpuc)1390 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1391 {
1392 u64 from, to;
1393 int br_sel = cpuc->br_sel;
1394 int i, j, type, to_plm;
1395 bool compress = false;
1396
1397 /* if sampling all branches, then nothing to filter */
1398 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1399 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1400 return;
1401
1402 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1403
1404 from = cpuc->lbr_entries[i].from;
1405 to = cpuc->lbr_entries[i].to;
1406 type = cpuc->lbr_entries[i].type;
1407
1408 /*
1409 * Parse the branch type recorded in LBR_x_INFO MSR.
1410 * Doesn't support OTHER_BRANCH decoding for now.
1411 * OTHER_BRANCH branch type still rely on software decoding.
1412 */
1413 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1414 type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1415 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1416 type = arch_lbr_br_type_map[type] | to_plm;
1417 } else
1418 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1419 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1420 if (cpuc->lbr_entries[i].in_tx)
1421 type |= X86_BR_IN_TX;
1422 else
1423 type |= X86_BR_NO_TX;
1424 }
1425
1426 /* if type does not correspond, then discard */
1427 if (type == X86_BR_NONE || (br_sel & type) != type) {
1428 cpuc->lbr_entries[i].from = 0;
1429 compress = true;
1430 }
1431
1432 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1433 cpuc->lbr_entries[i].type = common_branch_type(type);
1434 }
1435
1436 if (!compress)
1437 return;
1438
1439 /* remove all entries with from=0 */
1440 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1441 if (!cpuc->lbr_entries[i].from) {
1442 j = i;
1443 while (++j < cpuc->lbr_stack.nr)
1444 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1445 cpuc->lbr_stack.nr--;
1446 if (!cpuc->lbr_entries[i].from)
1447 continue;
1448 }
1449 i++;
1450 }
1451 }
1452
intel_pmu_store_pebs_lbrs(struct lbr_entry * lbr)1453 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1454 {
1455 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1456
1457 /* Cannot get TOS for large PEBS and Arch LBR */
1458 if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1459 (cpuc->n_pebs == cpuc->n_large_pebs))
1460 cpuc->lbr_stack.hw_idx = -1ULL;
1461 else
1462 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1463
1464 intel_pmu_store_lbr(cpuc, lbr);
1465 intel_pmu_lbr_filter(cpuc);
1466 }
1467
1468 /*
1469 * Map interface branch filters onto LBR filters
1470 */
1471 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1472 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1473 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1474 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1475 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1476 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1477 | LBR_IND_JMP | LBR_FAR,
1478 /*
1479 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1480 */
1481 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1482 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1483 /*
1484 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1485 */
1486 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1487 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1488 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1489 };
1490
1491 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1492 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1493 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1494 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1495 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1496 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1497 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1498 | LBR_FAR,
1499 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1500 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1501 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1502 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1503 };
1504
1505 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1506 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1507 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1508 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1509 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1510 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1511 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1512 | LBR_FAR,
1513 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1514 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1515 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1516 | LBR_RETURN | LBR_CALL_STACK,
1517 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1518 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1519 };
1520
1521 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1522 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = ARCH_LBR_ANY,
1523 [PERF_SAMPLE_BRANCH_USER_SHIFT] = ARCH_LBR_USER,
1524 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = ARCH_LBR_KERNEL,
1525 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1526 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = ARCH_LBR_RETURN |
1527 ARCH_LBR_OTHER_BRANCH,
1528 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = ARCH_LBR_REL_CALL |
1529 ARCH_LBR_IND_CALL |
1530 ARCH_LBR_OTHER_BRANCH,
1531 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = ARCH_LBR_IND_CALL,
1532 [PERF_SAMPLE_BRANCH_COND_SHIFT] = ARCH_LBR_JCC,
1533 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = ARCH_LBR_REL_CALL |
1534 ARCH_LBR_IND_CALL |
1535 ARCH_LBR_RETURN |
1536 ARCH_LBR_CALL_STACK,
1537 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = ARCH_LBR_IND_JMP,
1538 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = ARCH_LBR_REL_CALL,
1539 };
1540
1541 /* core */
intel_pmu_lbr_init_core(void)1542 void __init intel_pmu_lbr_init_core(void)
1543 {
1544 x86_pmu.lbr_nr = 4;
1545 x86_pmu.lbr_tos = MSR_LBR_TOS;
1546 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1547 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1548
1549 /*
1550 * SW branch filter usage:
1551 * - compensate for lack of HW filter
1552 */
1553 }
1554
1555 /* nehalem/westmere */
intel_pmu_lbr_init_nhm(void)1556 void __init intel_pmu_lbr_init_nhm(void)
1557 {
1558 x86_pmu.lbr_nr = 16;
1559 x86_pmu.lbr_tos = MSR_LBR_TOS;
1560 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1561 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1562
1563 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1564 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1565
1566 /*
1567 * SW branch filter usage:
1568 * - workaround LBR_SEL errata (see above)
1569 * - support syscall, sysret capture.
1570 * That requires LBR_FAR but that means far
1571 * jmp need to be filtered out
1572 */
1573 }
1574
1575 /* sandy bridge */
intel_pmu_lbr_init_snb(void)1576 void __init intel_pmu_lbr_init_snb(void)
1577 {
1578 x86_pmu.lbr_nr = 16;
1579 x86_pmu.lbr_tos = MSR_LBR_TOS;
1580 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1581 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1582
1583 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1584 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1585
1586 /*
1587 * SW branch filter usage:
1588 * - support syscall, sysret capture.
1589 * That requires LBR_FAR but that means far
1590 * jmp need to be filtered out
1591 */
1592 }
1593
1594 static inline struct kmem_cache *
create_lbr_kmem_cache(size_t size,size_t align)1595 create_lbr_kmem_cache(size_t size, size_t align)
1596 {
1597 return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1598 }
1599
1600 /* haswell */
intel_pmu_lbr_init_hsw(void)1601 void intel_pmu_lbr_init_hsw(void)
1602 {
1603 size_t size = sizeof(struct x86_perf_task_context);
1604
1605 x86_pmu.lbr_nr = 16;
1606 x86_pmu.lbr_tos = MSR_LBR_TOS;
1607 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1608 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1609
1610 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1611 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1612
1613 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1614
1615 if (lbr_from_signext_quirk_needed())
1616 static_branch_enable(&lbr_from_quirk_key);
1617 }
1618
1619 /* skylake */
intel_pmu_lbr_init_skl(void)1620 __init void intel_pmu_lbr_init_skl(void)
1621 {
1622 size_t size = sizeof(struct x86_perf_task_context);
1623
1624 x86_pmu.lbr_nr = 32;
1625 x86_pmu.lbr_tos = MSR_LBR_TOS;
1626 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1627 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1628 x86_pmu.lbr_info = MSR_LBR_INFO_0;
1629
1630 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1631 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1632
1633 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1634
1635 /*
1636 * SW branch filter usage:
1637 * - support syscall, sysret capture.
1638 * That requires LBR_FAR but that means far
1639 * jmp need to be filtered out
1640 */
1641 }
1642
1643 /* atom */
intel_pmu_lbr_init_atom(void)1644 void __init intel_pmu_lbr_init_atom(void)
1645 {
1646 /*
1647 * only models starting at stepping 10 seems
1648 * to have an operational LBR which can freeze
1649 * on PMU interrupt
1650 */
1651 if (boot_cpu_data.x86_model == 28
1652 && boot_cpu_data.x86_stepping < 10) {
1653 pr_cont("LBR disabled due to erratum");
1654 return;
1655 }
1656
1657 x86_pmu.lbr_nr = 8;
1658 x86_pmu.lbr_tos = MSR_LBR_TOS;
1659 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1660 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1661
1662 /*
1663 * SW branch filter usage:
1664 * - compensate for lack of HW filter
1665 */
1666 }
1667
1668 /* slm */
intel_pmu_lbr_init_slm(void)1669 void __init intel_pmu_lbr_init_slm(void)
1670 {
1671 x86_pmu.lbr_nr = 8;
1672 x86_pmu.lbr_tos = MSR_LBR_TOS;
1673 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1674 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1675
1676 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1677 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1678
1679 /*
1680 * SW branch filter usage:
1681 * - compensate for lack of HW filter
1682 */
1683 pr_cont("8-deep LBR, ");
1684 }
1685
1686 /* Knights Landing */
intel_pmu_lbr_init_knl(void)1687 void intel_pmu_lbr_init_knl(void)
1688 {
1689 x86_pmu.lbr_nr = 8;
1690 x86_pmu.lbr_tos = MSR_LBR_TOS;
1691 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1692 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1693
1694 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1695 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1696
1697 /* Knights Landing does have MISPREDICT bit */
1698 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1699 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1700 }
1701
1702 /*
1703 * LBR state size is variable based on the max number of registers.
1704 * This calculates the expected state size, which should match
1705 * what the hardware enumerates for the size of XFEATURE_LBR.
1706 */
get_lbr_state_size(void)1707 static inline unsigned int get_lbr_state_size(void)
1708 {
1709 return sizeof(struct arch_lbr_state) +
1710 x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1711 }
1712
is_arch_lbr_xsave_available(void)1713 static bool is_arch_lbr_xsave_available(void)
1714 {
1715 if (!boot_cpu_has(X86_FEATURE_XSAVES))
1716 return false;
1717
1718 /*
1719 * Check the LBR state with the corresponding software structure.
1720 * Disable LBR XSAVES support if the size doesn't match.
1721 */
1722 if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1723 return false;
1724
1725 return true;
1726 }
1727
intel_pmu_arch_lbr_init(void)1728 void __init intel_pmu_arch_lbr_init(void)
1729 {
1730 struct pmu *pmu = x86_get_pmu(smp_processor_id());
1731 union cpuid28_eax eax;
1732 union cpuid28_ebx ebx;
1733 union cpuid28_ecx ecx;
1734 unsigned int unused_edx;
1735 bool arch_lbr_xsave;
1736 size_t size;
1737 u64 lbr_nr;
1738
1739 /* Arch LBR Capabilities */
1740 cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1741
1742 lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1743 if (!lbr_nr)
1744 goto clear_arch_lbr;
1745
1746 /* Apply the max depth of Arch LBR */
1747 if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1748 goto clear_arch_lbr;
1749
1750 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1751 x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1752 x86_pmu.lbr_lip = eax.split.lbr_lip;
1753 x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1754 x86_pmu.lbr_filter = ebx.split.lbr_filter;
1755 x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1756 x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1757 x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1758 x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1759 x86_pmu.lbr_nr = lbr_nr;
1760
1761
1762 arch_lbr_xsave = is_arch_lbr_xsave_available();
1763 if (arch_lbr_xsave) {
1764 size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1765 get_lbr_state_size();
1766 pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1767 XSAVE_ALIGNMENT);
1768 }
1769
1770 if (!pmu->task_ctx_cache) {
1771 arch_lbr_xsave = false;
1772
1773 size = sizeof(struct x86_perf_task_context_arch_lbr) +
1774 lbr_nr * sizeof(struct lbr_entry);
1775 pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1776 }
1777
1778 x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1779 x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1780 x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1781
1782 /* LBR callstack requires both CPL and Branch Filtering support */
1783 if (!x86_pmu.lbr_cpl ||
1784 !x86_pmu.lbr_filter ||
1785 !x86_pmu.lbr_call_stack)
1786 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1787
1788 if (!x86_pmu.lbr_cpl) {
1789 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1790 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1791 } else if (!x86_pmu.lbr_filter) {
1792 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1793 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1794 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1795 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1796 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1797 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1798 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1799 }
1800
1801 x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1802 x86_pmu.lbr_ctl_map = arch_lbr_ctl_map;
1803
1804 if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1805 x86_pmu.lbr_ctl_map = NULL;
1806
1807 x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1808 if (arch_lbr_xsave) {
1809 x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1810 x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1811 x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1812 pr_cont("XSAVE ");
1813 } else {
1814 x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1815 x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1816 x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1817 }
1818
1819 pr_cont("Architectural LBR, ");
1820
1821 return;
1822
1823 clear_arch_lbr:
1824 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
1825 }
1826
1827 /**
1828 * x86_perf_get_lbr - get the LBR records information
1829 *
1830 * @lbr: the caller's memory to store the LBR records information
1831 *
1832 * Returns: 0 indicates the LBR info has been successfully obtained
1833 */
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)1834 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1835 {
1836 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1837
1838 lbr->nr = x86_pmu.lbr_nr;
1839 lbr->from = x86_pmu.lbr_from;
1840 lbr->to = x86_pmu.lbr_to;
1841 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1842
1843 return 0;
1844 }
1845 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1846
1847 struct event_constraint vlbr_constraint =
1848 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1849 FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1850