1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Per core/cpu state 4 * 5 * Used to coordinate shared registers between HT threads or 6 * among events on a single PMU. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/stddef.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/nmi.h> 17 #include <linux/kvm_host.h> 18 19 #include <asm/cpufeature.h> 20 #include <asm/hardirq.h> 21 #include <asm/intel-family.h> 22 #include <asm/intel_pt.h> 23 #include <asm/apic.h> 24 #include <asm/cpu_device_id.h> 25 26 #include "../perf_event.h" 27 28 /* 29 * Intel PerfMon, used on Core and later. 30 */ 31 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 32 { 33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 41 }; 42 43 static struct event_constraint intel_core_event_constraints[] __read_mostly = 44 { 45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ 51 EVENT_CONSTRAINT_END 52 }; 53 54 static struct event_constraint intel_core2_event_constraints[] __read_mostly = 55 { 56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ 68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 69 EVENT_CONSTRAINT_END 70 }; 71 72 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = 73 { 74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ 81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ 82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ 83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 85 EVENT_CONSTRAINT_END 86 }; 87 88 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 89 { 90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 93 EVENT_EXTRA_END 94 }; 95 96 static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 97 { 98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ 105 EVENT_CONSTRAINT_END 106 }; 107 108 static struct event_constraint intel_snb_event_constraints[] __read_mostly = 109 { 110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 122 123 /* 124 * When HT is off these events can only run on the bottom 4 counters 125 * When HT is on, they are impacted by the HT bug and require EXCL access 126 */ 127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 131 132 EVENT_CONSTRAINT_END 133 }; 134 135 static struct event_constraint intel_ivb_event_constraints[] __read_mostly = 136 { 137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ 142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ 147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 150 151 /* 152 * When HT is off these events can only run on the bottom 4 counters 153 * When HT is on, they are impacted by the HT bug and require EXCL access 154 */ 155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 159 160 EVENT_CONSTRAINT_END 161 }; 162 163 static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 164 { 165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 169 EVENT_EXTRA_END 170 }; 171 172 static struct event_constraint intel_v1_event_constraints[] __read_mostly = 173 { 174 EVENT_CONSTRAINT_END 175 }; 176 177 static struct event_constraint intel_gen_event_constraints[] __read_mostly = 178 { 179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 182 EVENT_CONSTRAINT_END 183 }; 184 185 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = 186 { 187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 191 FIXED_EVENT_CONSTRAINT(0x0500, 4), 192 FIXED_EVENT_CONSTRAINT(0x0600, 5), 193 FIXED_EVENT_CONSTRAINT(0x0700, 6), 194 FIXED_EVENT_CONSTRAINT(0x0800, 7), 195 FIXED_EVENT_CONSTRAINT(0x0900, 8), 196 FIXED_EVENT_CONSTRAINT(0x0a00, 9), 197 FIXED_EVENT_CONSTRAINT(0x0b00, 10), 198 FIXED_EVENT_CONSTRAINT(0x0c00, 11), 199 FIXED_EVENT_CONSTRAINT(0x0d00, 12), 200 FIXED_EVENT_CONSTRAINT(0x0e00, 13), 201 FIXED_EVENT_CONSTRAINT(0x0f00, 14), 202 FIXED_EVENT_CONSTRAINT(0x1000, 15), 203 EVENT_CONSTRAINT_END 204 }; 205 206 static struct event_constraint intel_slm_event_constraints[] __read_mostly = 207 { 208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 211 EVENT_CONSTRAINT_END 212 }; 213 214 static struct event_constraint intel_grt_event_constraints[] __read_mostly = { 215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 218 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 219 EVENT_CONSTRAINT_END 220 }; 221 222 static struct event_constraint intel_skl_event_constraints[] = { 223 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 224 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 225 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 226 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 227 228 /* 229 * when HT is off, these can only run on the bottom 4 counters 230 */ 231 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 232 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 233 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 234 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 235 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 236 237 EVENT_CONSTRAINT_END 238 }; 239 240 static struct extra_reg intel_knl_extra_regs[] __read_mostly = { 241 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), 242 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), 243 EVENT_EXTRA_END 244 }; 245 246 static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 251 EVENT_EXTRA_END 252 }; 253 254 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 255 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 256 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 257 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 258 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 259 EVENT_EXTRA_END 260 }; 261 262 static struct extra_reg intel_skl_extra_regs[] __read_mostly = { 263 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 264 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 265 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 266 /* 267 * Note the low 8 bits eventsel code is not a continuous field, containing 268 * some #GPing bits. These are masked out. 269 */ 270 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 271 EVENT_EXTRA_END 272 }; 273 274 static struct event_constraint intel_icl_event_constraints[] = { 275 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 276 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */ 277 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 278 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 279 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 280 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 281 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 282 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 283 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 284 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 285 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), 286 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), 287 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ 288 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), 289 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), 290 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ 291 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ 292 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ 293 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ 294 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 295 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 296 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 297 INTEL_EVENT_CONSTRAINT(0xef, 0xf), 298 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 299 EVENT_CONSTRAINT_END 300 }; 301 302 static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 303 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), 304 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), 305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 307 EVENT_EXTRA_END 308 }; 309 310 static struct extra_reg intel_glc_extra_regs[] __read_mostly = { 311 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 312 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 313 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 314 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 315 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 316 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 317 EVENT_EXTRA_END 318 }; 319 320 static struct event_constraint intel_glc_event_constraints[] = { 321 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 322 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 323 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 324 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 325 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 326 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 327 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 328 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 329 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 330 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 331 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 332 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 333 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 334 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 335 336 INTEL_EVENT_CONSTRAINT(0x2e, 0xff), 337 INTEL_EVENT_CONSTRAINT(0x3c, 0xff), 338 /* 339 * Generally event codes < 0x90 are restricted to counters 0-3. 340 * The 0x2E and 0x3C are exception, which has no restriction. 341 */ 342 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), 343 344 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), 345 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), 346 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf), 347 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 348 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 349 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1), 350 INTEL_EVENT_CONSTRAINT(0xce, 0x1), 351 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), 352 /* 353 * Generally event codes >= 0x90 are likely to have no restrictions. 354 * The exception are defined as above. 355 */ 356 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff), 357 358 EVENT_CONSTRAINT_END 359 }; 360 361 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = { 362 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 363 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 364 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 365 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 366 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 367 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 368 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 369 EVENT_EXTRA_END 370 }; 371 372 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 373 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 374 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 375 376 static struct attribute *nhm_mem_events_attrs[] = { 377 EVENT_PTR(mem_ld_nhm), 378 NULL, 379 }; 380 381 /* 382 * topdown events for Intel Core CPUs. 383 * 384 * The events are all in slots, which is a free slot in a 4 wide 385 * pipeline. Some events are already reported in slots, for cycle 386 * events we multiply by the pipeline width (4). 387 * 388 * With Hyper Threading on, topdown metrics are either summed or averaged 389 * between the threads of a core: (count_t0 + count_t1). 390 * 391 * For the average case the metric is always scaled to pipeline width, 392 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) 393 */ 394 395 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, 396 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */ 397 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */ 398 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); 399 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, 400 "event=0xe,umask=0x1"); /* uops_issued.any */ 401 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, 402 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */ 403 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, 404 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */ 405 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, 406 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */ 407 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */ 408 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, 409 "4", "2"); 410 411 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); 412 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); 413 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); 414 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); 415 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); 416 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84"); 417 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85"); 418 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86"); 419 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87"); 420 421 static struct attribute *snb_events_attrs[] = { 422 EVENT_PTR(td_slots_issued), 423 EVENT_PTR(td_slots_retired), 424 EVENT_PTR(td_fetch_bubbles), 425 EVENT_PTR(td_total_slots), 426 EVENT_PTR(td_total_slots_scale), 427 EVENT_PTR(td_recovery_bubbles), 428 EVENT_PTR(td_recovery_bubbles_scale), 429 NULL, 430 }; 431 432 static struct attribute *snb_mem_events_attrs[] = { 433 EVENT_PTR(mem_ld_snb), 434 EVENT_PTR(mem_st_snb), 435 NULL, 436 }; 437 438 static struct event_constraint intel_hsw_event_constraints[] = { 439 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 440 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 441 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 442 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 443 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 444 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 445 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 446 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 447 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 448 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 449 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 450 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 451 452 /* 453 * When HT is off these events can only run on the bottom 4 counters 454 * When HT is on, they are impacted by the HT bug and require EXCL access 455 */ 456 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 457 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 458 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 459 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 460 461 EVENT_CONSTRAINT_END 462 }; 463 464 static struct event_constraint intel_bdw_event_constraints[] = { 465 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 466 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 467 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 468 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 469 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 470 /* 471 * when HT is off, these can only run on the bottom 4 counters 472 */ 473 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 474 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 475 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 476 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 477 EVENT_CONSTRAINT_END 478 }; 479 480 static u64 intel_pmu_event_map(int hw_event) 481 { 482 return intel_perfmon_event_map[hw_event]; 483 } 484 485 static __initconst const u64 glc_hw_cache_event_ids 486 [PERF_COUNT_HW_CACHE_MAX] 487 [PERF_COUNT_HW_CACHE_OP_MAX] 488 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 489 { 490 [ C(L1D ) ] = { 491 [ C(OP_READ) ] = { 492 [ C(RESULT_ACCESS) ] = 0x81d0, 493 [ C(RESULT_MISS) ] = 0xe124, 494 }, 495 [ C(OP_WRITE) ] = { 496 [ C(RESULT_ACCESS) ] = 0x82d0, 497 }, 498 }, 499 [ C(L1I ) ] = { 500 [ C(OP_READ) ] = { 501 [ C(RESULT_MISS) ] = 0xe424, 502 }, 503 [ C(OP_WRITE) ] = { 504 [ C(RESULT_ACCESS) ] = -1, 505 [ C(RESULT_MISS) ] = -1, 506 }, 507 }, 508 [ C(LL ) ] = { 509 [ C(OP_READ) ] = { 510 [ C(RESULT_ACCESS) ] = 0x12a, 511 [ C(RESULT_MISS) ] = 0x12a, 512 }, 513 [ C(OP_WRITE) ] = { 514 [ C(RESULT_ACCESS) ] = 0x12a, 515 [ C(RESULT_MISS) ] = 0x12a, 516 }, 517 }, 518 [ C(DTLB) ] = { 519 [ C(OP_READ) ] = { 520 [ C(RESULT_ACCESS) ] = 0x81d0, 521 [ C(RESULT_MISS) ] = 0xe12, 522 }, 523 [ C(OP_WRITE) ] = { 524 [ C(RESULT_ACCESS) ] = 0x82d0, 525 [ C(RESULT_MISS) ] = 0xe13, 526 }, 527 }, 528 [ C(ITLB) ] = { 529 [ C(OP_READ) ] = { 530 [ C(RESULT_ACCESS) ] = -1, 531 [ C(RESULT_MISS) ] = 0xe11, 532 }, 533 [ C(OP_WRITE) ] = { 534 [ C(RESULT_ACCESS) ] = -1, 535 [ C(RESULT_MISS) ] = -1, 536 }, 537 [ C(OP_PREFETCH) ] = { 538 [ C(RESULT_ACCESS) ] = -1, 539 [ C(RESULT_MISS) ] = -1, 540 }, 541 }, 542 [ C(BPU ) ] = { 543 [ C(OP_READ) ] = { 544 [ C(RESULT_ACCESS) ] = 0x4c4, 545 [ C(RESULT_MISS) ] = 0x4c5, 546 }, 547 [ C(OP_WRITE) ] = { 548 [ C(RESULT_ACCESS) ] = -1, 549 [ C(RESULT_MISS) ] = -1, 550 }, 551 [ C(OP_PREFETCH) ] = { 552 [ C(RESULT_ACCESS) ] = -1, 553 [ C(RESULT_MISS) ] = -1, 554 }, 555 }, 556 [ C(NODE) ] = { 557 [ C(OP_READ) ] = { 558 [ C(RESULT_ACCESS) ] = 0x12a, 559 [ C(RESULT_MISS) ] = 0x12a, 560 }, 561 }, 562 }; 563 564 static __initconst const u64 glc_hw_cache_extra_regs 565 [PERF_COUNT_HW_CACHE_MAX] 566 [PERF_COUNT_HW_CACHE_OP_MAX] 567 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 568 { 569 [ C(LL ) ] = { 570 [ C(OP_READ) ] = { 571 [ C(RESULT_ACCESS) ] = 0x10001, 572 [ C(RESULT_MISS) ] = 0x3fbfc00001, 573 }, 574 [ C(OP_WRITE) ] = { 575 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002, 576 [ C(RESULT_MISS) ] = 0x3f3fc00002, 577 }, 578 }, 579 [ C(NODE) ] = { 580 [ C(OP_READ) ] = { 581 [ C(RESULT_ACCESS) ] = 0x10c000001, 582 [ C(RESULT_MISS) ] = 0x3fb3000001, 583 }, 584 }, 585 }; 586 587 /* 588 * Notes on the events: 589 * - data reads do not include code reads (comparable to earlier tables) 590 * - data counts include speculative execution (except L1 write, dtlb, bpu) 591 * - remote node access includes remote memory, remote cache, remote mmio. 592 * - prefetches are not included in the counts. 593 * - icache miss does not include decoded icache 594 */ 595 596 #define SKL_DEMAND_DATA_RD BIT_ULL(0) 597 #define SKL_DEMAND_RFO BIT_ULL(1) 598 #define SKL_ANY_RESPONSE BIT_ULL(16) 599 #define SKL_SUPPLIER_NONE BIT_ULL(17) 600 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) 601 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) 602 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) 603 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) 604 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ 605 SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 606 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 607 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 608 #define SKL_SPL_HIT BIT_ULL(30) 609 #define SKL_SNOOP_NONE BIT_ULL(31) 610 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) 611 #define SKL_SNOOP_MISS BIT_ULL(33) 612 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) 613 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) 614 #define SKL_SNOOP_HITM BIT_ULL(36) 615 #define SKL_SNOOP_NON_DRAM BIT_ULL(37) 616 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ 617 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 618 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 619 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) 620 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD 621 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ 622 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 623 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 624 SKL_SNOOP_HITM|SKL_SPL_HIT) 625 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO 626 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE 627 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 628 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 629 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 630 631 static __initconst const u64 skl_hw_cache_event_ids 632 [PERF_COUNT_HW_CACHE_MAX] 633 [PERF_COUNT_HW_CACHE_OP_MAX] 634 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 635 { 636 [ C(L1D ) ] = { 637 [ C(OP_READ) ] = { 638 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 639 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 640 }, 641 [ C(OP_WRITE) ] = { 642 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 643 [ C(RESULT_MISS) ] = 0x0, 644 }, 645 [ C(OP_PREFETCH) ] = { 646 [ C(RESULT_ACCESS) ] = 0x0, 647 [ C(RESULT_MISS) ] = 0x0, 648 }, 649 }, 650 [ C(L1I ) ] = { 651 [ C(OP_READ) ] = { 652 [ C(RESULT_ACCESS) ] = 0x0, 653 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ 654 }, 655 [ C(OP_WRITE) ] = { 656 [ C(RESULT_ACCESS) ] = -1, 657 [ C(RESULT_MISS) ] = -1, 658 }, 659 [ C(OP_PREFETCH) ] = { 660 [ C(RESULT_ACCESS) ] = 0x0, 661 [ C(RESULT_MISS) ] = 0x0, 662 }, 663 }, 664 [ C(LL ) ] = { 665 [ C(OP_READ) ] = { 666 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 667 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 668 }, 669 [ C(OP_WRITE) ] = { 670 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 671 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 672 }, 673 [ C(OP_PREFETCH) ] = { 674 [ C(RESULT_ACCESS) ] = 0x0, 675 [ C(RESULT_MISS) ] = 0x0, 676 }, 677 }, 678 [ C(DTLB) ] = { 679 [ C(OP_READ) ] = { 680 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 681 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 682 }, 683 [ C(OP_WRITE) ] = { 684 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 685 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 686 }, 687 [ C(OP_PREFETCH) ] = { 688 [ C(RESULT_ACCESS) ] = 0x0, 689 [ C(RESULT_MISS) ] = 0x0, 690 }, 691 }, 692 [ C(ITLB) ] = { 693 [ C(OP_READ) ] = { 694 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ 695 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ 696 }, 697 [ C(OP_WRITE) ] = { 698 [ C(RESULT_ACCESS) ] = -1, 699 [ C(RESULT_MISS) ] = -1, 700 }, 701 [ C(OP_PREFETCH) ] = { 702 [ C(RESULT_ACCESS) ] = -1, 703 [ C(RESULT_MISS) ] = -1, 704 }, 705 }, 706 [ C(BPU ) ] = { 707 [ C(OP_READ) ] = { 708 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 709 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 710 }, 711 [ C(OP_WRITE) ] = { 712 [ C(RESULT_ACCESS) ] = -1, 713 [ C(RESULT_MISS) ] = -1, 714 }, 715 [ C(OP_PREFETCH) ] = { 716 [ C(RESULT_ACCESS) ] = -1, 717 [ C(RESULT_MISS) ] = -1, 718 }, 719 }, 720 [ C(NODE) ] = { 721 [ C(OP_READ) ] = { 722 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 723 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 724 }, 725 [ C(OP_WRITE) ] = { 726 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 727 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 728 }, 729 [ C(OP_PREFETCH) ] = { 730 [ C(RESULT_ACCESS) ] = 0x0, 731 [ C(RESULT_MISS) ] = 0x0, 732 }, 733 }, 734 }; 735 736 static __initconst const u64 skl_hw_cache_extra_regs 737 [PERF_COUNT_HW_CACHE_MAX] 738 [PERF_COUNT_HW_CACHE_OP_MAX] 739 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 740 { 741 [ C(LL ) ] = { 742 [ C(OP_READ) ] = { 743 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 744 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 745 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 746 SKL_L3_MISS|SKL_ANY_SNOOP| 747 SKL_SUPPLIER_NONE, 748 }, 749 [ C(OP_WRITE) ] = { 750 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 751 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 752 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 753 SKL_L3_MISS|SKL_ANY_SNOOP| 754 SKL_SUPPLIER_NONE, 755 }, 756 [ C(OP_PREFETCH) ] = { 757 [ C(RESULT_ACCESS) ] = 0x0, 758 [ C(RESULT_MISS) ] = 0x0, 759 }, 760 }, 761 [ C(NODE) ] = { 762 [ C(OP_READ) ] = { 763 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 764 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 765 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 766 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 767 }, 768 [ C(OP_WRITE) ] = { 769 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 770 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 771 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 772 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 773 }, 774 [ C(OP_PREFETCH) ] = { 775 [ C(RESULT_ACCESS) ] = 0x0, 776 [ C(RESULT_MISS) ] = 0x0, 777 }, 778 }, 779 }; 780 781 #define SNB_DMND_DATA_RD (1ULL << 0) 782 #define SNB_DMND_RFO (1ULL << 1) 783 #define SNB_DMND_IFETCH (1ULL << 2) 784 #define SNB_DMND_WB (1ULL << 3) 785 #define SNB_PF_DATA_RD (1ULL << 4) 786 #define SNB_PF_RFO (1ULL << 5) 787 #define SNB_PF_IFETCH (1ULL << 6) 788 #define SNB_LLC_DATA_RD (1ULL << 7) 789 #define SNB_LLC_RFO (1ULL << 8) 790 #define SNB_LLC_IFETCH (1ULL << 9) 791 #define SNB_BUS_LOCKS (1ULL << 10) 792 #define SNB_STRM_ST (1ULL << 11) 793 #define SNB_OTHER (1ULL << 15) 794 #define SNB_RESP_ANY (1ULL << 16) 795 #define SNB_NO_SUPP (1ULL << 17) 796 #define SNB_LLC_HITM (1ULL << 18) 797 #define SNB_LLC_HITE (1ULL << 19) 798 #define SNB_LLC_HITS (1ULL << 20) 799 #define SNB_LLC_HITF (1ULL << 21) 800 #define SNB_LOCAL (1ULL << 22) 801 #define SNB_REMOTE (0xffULL << 23) 802 #define SNB_SNP_NONE (1ULL << 31) 803 #define SNB_SNP_NOT_NEEDED (1ULL << 32) 804 #define SNB_SNP_MISS (1ULL << 33) 805 #define SNB_NO_FWD (1ULL << 34) 806 #define SNB_SNP_FWD (1ULL << 35) 807 #define SNB_HITM (1ULL << 36) 808 #define SNB_NON_DRAM (1ULL << 37) 809 810 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) 811 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) 812 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 813 814 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ 815 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ 816 SNB_HITM) 817 818 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) 819 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) 820 821 #define SNB_L3_ACCESS SNB_RESP_ANY 822 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) 823 824 static __initconst const u64 snb_hw_cache_extra_regs 825 [PERF_COUNT_HW_CACHE_MAX] 826 [PERF_COUNT_HW_CACHE_OP_MAX] 827 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 828 { 829 [ C(LL ) ] = { 830 [ C(OP_READ) ] = { 831 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, 832 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, 833 }, 834 [ C(OP_WRITE) ] = { 835 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, 836 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, 837 }, 838 [ C(OP_PREFETCH) ] = { 839 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, 840 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, 841 }, 842 }, 843 [ C(NODE) ] = { 844 [ C(OP_READ) ] = { 845 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, 846 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, 847 }, 848 [ C(OP_WRITE) ] = { 849 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, 850 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, 851 }, 852 [ C(OP_PREFETCH) ] = { 853 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, 854 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, 855 }, 856 }, 857 }; 858 859 static __initconst const u64 snb_hw_cache_event_ids 860 [PERF_COUNT_HW_CACHE_MAX] 861 [PERF_COUNT_HW_CACHE_OP_MAX] 862 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 863 { 864 [ C(L1D) ] = { 865 [ C(OP_READ) ] = { 866 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ 867 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ 868 }, 869 [ C(OP_WRITE) ] = { 870 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ 871 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ 872 }, 873 [ C(OP_PREFETCH) ] = { 874 [ C(RESULT_ACCESS) ] = 0x0, 875 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ 876 }, 877 }, 878 [ C(L1I ) ] = { 879 [ C(OP_READ) ] = { 880 [ C(RESULT_ACCESS) ] = 0x0, 881 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ 882 }, 883 [ C(OP_WRITE) ] = { 884 [ C(RESULT_ACCESS) ] = -1, 885 [ C(RESULT_MISS) ] = -1, 886 }, 887 [ C(OP_PREFETCH) ] = { 888 [ C(RESULT_ACCESS) ] = 0x0, 889 [ C(RESULT_MISS) ] = 0x0, 890 }, 891 }, 892 [ C(LL ) ] = { 893 [ C(OP_READ) ] = { 894 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 895 [ C(RESULT_ACCESS) ] = 0x01b7, 896 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 897 [ C(RESULT_MISS) ] = 0x01b7, 898 }, 899 [ C(OP_WRITE) ] = { 900 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 901 [ C(RESULT_ACCESS) ] = 0x01b7, 902 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 903 [ C(RESULT_MISS) ] = 0x01b7, 904 }, 905 [ C(OP_PREFETCH) ] = { 906 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 907 [ C(RESULT_ACCESS) ] = 0x01b7, 908 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 909 [ C(RESULT_MISS) ] = 0x01b7, 910 }, 911 }, 912 [ C(DTLB) ] = { 913 [ C(OP_READ) ] = { 914 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ 915 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ 916 }, 917 [ C(OP_WRITE) ] = { 918 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ 919 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 920 }, 921 [ C(OP_PREFETCH) ] = { 922 [ C(RESULT_ACCESS) ] = 0x0, 923 [ C(RESULT_MISS) ] = 0x0, 924 }, 925 }, 926 [ C(ITLB) ] = { 927 [ C(OP_READ) ] = { 928 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ 929 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ 930 }, 931 [ C(OP_WRITE) ] = { 932 [ C(RESULT_ACCESS) ] = -1, 933 [ C(RESULT_MISS) ] = -1, 934 }, 935 [ C(OP_PREFETCH) ] = { 936 [ C(RESULT_ACCESS) ] = -1, 937 [ C(RESULT_MISS) ] = -1, 938 }, 939 }, 940 [ C(BPU ) ] = { 941 [ C(OP_READ) ] = { 942 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 943 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 944 }, 945 [ C(OP_WRITE) ] = { 946 [ C(RESULT_ACCESS) ] = -1, 947 [ C(RESULT_MISS) ] = -1, 948 }, 949 [ C(OP_PREFETCH) ] = { 950 [ C(RESULT_ACCESS) ] = -1, 951 [ C(RESULT_MISS) ] = -1, 952 }, 953 }, 954 [ C(NODE) ] = { 955 [ C(OP_READ) ] = { 956 [ C(RESULT_ACCESS) ] = 0x01b7, 957 [ C(RESULT_MISS) ] = 0x01b7, 958 }, 959 [ C(OP_WRITE) ] = { 960 [ C(RESULT_ACCESS) ] = 0x01b7, 961 [ C(RESULT_MISS) ] = 0x01b7, 962 }, 963 [ C(OP_PREFETCH) ] = { 964 [ C(RESULT_ACCESS) ] = 0x01b7, 965 [ C(RESULT_MISS) ] = 0x01b7, 966 }, 967 }, 968 969 }; 970 971 /* 972 * Notes on the events: 973 * - data reads do not include code reads (comparable to earlier tables) 974 * - data counts include speculative execution (except L1 write, dtlb, bpu) 975 * - remote node access includes remote memory, remote cache, remote mmio. 976 * - prefetches are not included in the counts because they are not 977 * reliably counted. 978 */ 979 980 #define HSW_DEMAND_DATA_RD BIT_ULL(0) 981 #define HSW_DEMAND_RFO BIT_ULL(1) 982 #define HSW_ANY_RESPONSE BIT_ULL(16) 983 #define HSW_SUPPLIER_NONE BIT_ULL(17) 984 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) 985 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) 986 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) 987 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) 988 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ 989 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 990 HSW_L3_MISS_REMOTE_HOP2P) 991 #define HSW_SNOOP_NONE BIT_ULL(31) 992 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) 993 #define HSW_SNOOP_MISS BIT_ULL(33) 994 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) 995 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) 996 #define HSW_SNOOP_HITM BIT_ULL(36) 997 #define HSW_SNOOP_NON_DRAM BIT_ULL(37) 998 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ 999 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ 1000 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ 1001 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) 1002 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) 1003 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD 1004 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO 1005 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ 1006 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) 1007 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE 1008 1009 #define BDW_L3_MISS_LOCAL BIT(26) 1010 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ 1011 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 1012 HSW_L3_MISS_REMOTE_HOP2P) 1013 1014 1015 static __initconst const u64 hsw_hw_cache_event_ids 1016 [PERF_COUNT_HW_CACHE_MAX] 1017 [PERF_COUNT_HW_CACHE_OP_MAX] 1018 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1019 { 1020 [ C(L1D ) ] = { 1021 [ C(OP_READ) ] = { 1022 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1023 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 1024 }, 1025 [ C(OP_WRITE) ] = { 1026 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1027 [ C(RESULT_MISS) ] = 0x0, 1028 }, 1029 [ C(OP_PREFETCH) ] = { 1030 [ C(RESULT_ACCESS) ] = 0x0, 1031 [ C(RESULT_MISS) ] = 0x0, 1032 }, 1033 }, 1034 [ C(L1I ) ] = { 1035 [ C(OP_READ) ] = { 1036 [ C(RESULT_ACCESS) ] = 0x0, 1037 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ 1038 }, 1039 [ C(OP_WRITE) ] = { 1040 [ C(RESULT_ACCESS) ] = -1, 1041 [ C(RESULT_MISS) ] = -1, 1042 }, 1043 [ C(OP_PREFETCH) ] = { 1044 [ C(RESULT_ACCESS) ] = 0x0, 1045 [ C(RESULT_MISS) ] = 0x0, 1046 }, 1047 }, 1048 [ C(LL ) ] = { 1049 [ C(OP_READ) ] = { 1050 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1051 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1052 }, 1053 [ C(OP_WRITE) ] = { 1054 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1055 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1056 }, 1057 [ C(OP_PREFETCH) ] = { 1058 [ C(RESULT_ACCESS) ] = 0x0, 1059 [ C(RESULT_MISS) ] = 0x0, 1060 }, 1061 }, 1062 [ C(DTLB) ] = { 1063 [ C(OP_READ) ] = { 1064 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1065 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ 1066 }, 1067 [ C(OP_WRITE) ] = { 1068 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1069 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 1070 }, 1071 [ C(OP_PREFETCH) ] = { 1072 [ C(RESULT_ACCESS) ] = 0x0, 1073 [ C(RESULT_MISS) ] = 0x0, 1074 }, 1075 }, 1076 [ C(ITLB) ] = { 1077 [ C(OP_READ) ] = { 1078 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ 1079 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ 1080 }, 1081 [ C(OP_WRITE) ] = { 1082 [ C(RESULT_ACCESS) ] = -1, 1083 [ C(RESULT_MISS) ] = -1, 1084 }, 1085 [ C(OP_PREFETCH) ] = { 1086 [ C(RESULT_ACCESS) ] = -1, 1087 [ C(RESULT_MISS) ] = -1, 1088 }, 1089 }, 1090 [ C(BPU ) ] = { 1091 [ C(OP_READ) ] = { 1092 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1093 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1094 }, 1095 [ C(OP_WRITE) ] = { 1096 [ C(RESULT_ACCESS) ] = -1, 1097 [ C(RESULT_MISS) ] = -1, 1098 }, 1099 [ C(OP_PREFETCH) ] = { 1100 [ C(RESULT_ACCESS) ] = -1, 1101 [ C(RESULT_MISS) ] = -1, 1102 }, 1103 }, 1104 [ C(NODE) ] = { 1105 [ C(OP_READ) ] = { 1106 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1107 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1108 }, 1109 [ C(OP_WRITE) ] = { 1110 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1111 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1112 }, 1113 [ C(OP_PREFETCH) ] = { 1114 [ C(RESULT_ACCESS) ] = 0x0, 1115 [ C(RESULT_MISS) ] = 0x0, 1116 }, 1117 }, 1118 }; 1119 1120 static __initconst const u64 hsw_hw_cache_extra_regs 1121 [PERF_COUNT_HW_CACHE_MAX] 1122 [PERF_COUNT_HW_CACHE_OP_MAX] 1123 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1124 { 1125 [ C(LL ) ] = { 1126 [ C(OP_READ) ] = { 1127 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1128 HSW_LLC_ACCESS, 1129 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1130 HSW_L3_MISS|HSW_ANY_SNOOP, 1131 }, 1132 [ C(OP_WRITE) ] = { 1133 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1134 HSW_LLC_ACCESS, 1135 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1136 HSW_L3_MISS|HSW_ANY_SNOOP, 1137 }, 1138 [ C(OP_PREFETCH) ] = { 1139 [ C(RESULT_ACCESS) ] = 0x0, 1140 [ C(RESULT_MISS) ] = 0x0, 1141 }, 1142 }, 1143 [ C(NODE) ] = { 1144 [ C(OP_READ) ] = { 1145 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1146 HSW_L3_MISS_LOCAL_DRAM| 1147 HSW_SNOOP_DRAM, 1148 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1149 HSW_L3_MISS_REMOTE| 1150 HSW_SNOOP_DRAM, 1151 }, 1152 [ C(OP_WRITE) ] = { 1153 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1154 HSW_L3_MISS_LOCAL_DRAM| 1155 HSW_SNOOP_DRAM, 1156 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1157 HSW_L3_MISS_REMOTE| 1158 HSW_SNOOP_DRAM, 1159 }, 1160 [ C(OP_PREFETCH) ] = { 1161 [ C(RESULT_ACCESS) ] = 0x0, 1162 [ C(RESULT_MISS) ] = 0x0, 1163 }, 1164 }, 1165 }; 1166 1167 static __initconst const u64 westmere_hw_cache_event_ids 1168 [PERF_COUNT_HW_CACHE_MAX] 1169 [PERF_COUNT_HW_CACHE_OP_MAX] 1170 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1171 { 1172 [ C(L1D) ] = { 1173 [ C(OP_READ) ] = { 1174 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1175 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1176 }, 1177 [ C(OP_WRITE) ] = { 1178 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1179 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1180 }, 1181 [ C(OP_PREFETCH) ] = { 1182 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1183 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1184 }, 1185 }, 1186 [ C(L1I ) ] = { 1187 [ C(OP_READ) ] = { 1188 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1189 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1190 }, 1191 [ C(OP_WRITE) ] = { 1192 [ C(RESULT_ACCESS) ] = -1, 1193 [ C(RESULT_MISS) ] = -1, 1194 }, 1195 [ C(OP_PREFETCH) ] = { 1196 [ C(RESULT_ACCESS) ] = 0x0, 1197 [ C(RESULT_MISS) ] = 0x0, 1198 }, 1199 }, 1200 [ C(LL ) ] = { 1201 [ C(OP_READ) ] = { 1202 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1203 [ C(RESULT_ACCESS) ] = 0x01b7, 1204 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1205 [ C(RESULT_MISS) ] = 0x01b7, 1206 }, 1207 /* 1208 * Use RFO, not WRITEBACK, because a write miss would typically occur 1209 * on RFO. 1210 */ 1211 [ C(OP_WRITE) ] = { 1212 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1213 [ C(RESULT_ACCESS) ] = 0x01b7, 1214 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1215 [ C(RESULT_MISS) ] = 0x01b7, 1216 }, 1217 [ C(OP_PREFETCH) ] = { 1218 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1219 [ C(RESULT_ACCESS) ] = 0x01b7, 1220 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1221 [ C(RESULT_MISS) ] = 0x01b7, 1222 }, 1223 }, 1224 [ C(DTLB) ] = { 1225 [ C(OP_READ) ] = { 1226 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1227 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1228 }, 1229 [ C(OP_WRITE) ] = { 1230 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1231 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1232 }, 1233 [ C(OP_PREFETCH) ] = { 1234 [ C(RESULT_ACCESS) ] = 0x0, 1235 [ C(RESULT_MISS) ] = 0x0, 1236 }, 1237 }, 1238 [ C(ITLB) ] = { 1239 [ C(OP_READ) ] = { 1240 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1241 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ 1242 }, 1243 [ C(OP_WRITE) ] = { 1244 [ C(RESULT_ACCESS) ] = -1, 1245 [ C(RESULT_MISS) ] = -1, 1246 }, 1247 [ C(OP_PREFETCH) ] = { 1248 [ C(RESULT_ACCESS) ] = -1, 1249 [ C(RESULT_MISS) ] = -1, 1250 }, 1251 }, 1252 [ C(BPU ) ] = { 1253 [ C(OP_READ) ] = { 1254 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1255 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1256 }, 1257 [ C(OP_WRITE) ] = { 1258 [ C(RESULT_ACCESS) ] = -1, 1259 [ C(RESULT_MISS) ] = -1, 1260 }, 1261 [ C(OP_PREFETCH) ] = { 1262 [ C(RESULT_ACCESS) ] = -1, 1263 [ C(RESULT_MISS) ] = -1, 1264 }, 1265 }, 1266 [ C(NODE) ] = { 1267 [ C(OP_READ) ] = { 1268 [ C(RESULT_ACCESS) ] = 0x01b7, 1269 [ C(RESULT_MISS) ] = 0x01b7, 1270 }, 1271 [ C(OP_WRITE) ] = { 1272 [ C(RESULT_ACCESS) ] = 0x01b7, 1273 [ C(RESULT_MISS) ] = 0x01b7, 1274 }, 1275 [ C(OP_PREFETCH) ] = { 1276 [ C(RESULT_ACCESS) ] = 0x01b7, 1277 [ C(RESULT_MISS) ] = 0x01b7, 1278 }, 1279 }, 1280 }; 1281 1282 /* 1283 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 1284 * See IA32 SDM Vol 3B 30.6.1.3 1285 */ 1286 1287 #define NHM_DMND_DATA_RD (1 << 0) 1288 #define NHM_DMND_RFO (1 << 1) 1289 #define NHM_DMND_IFETCH (1 << 2) 1290 #define NHM_DMND_WB (1 << 3) 1291 #define NHM_PF_DATA_RD (1 << 4) 1292 #define NHM_PF_DATA_RFO (1 << 5) 1293 #define NHM_PF_IFETCH (1 << 6) 1294 #define NHM_OFFCORE_OTHER (1 << 7) 1295 #define NHM_UNCORE_HIT (1 << 8) 1296 #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 1297 #define NHM_OTHER_CORE_HITM (1 << 10) 1298 /* reserved */ 1299 #define NHM_REMOTE_CACHE_FWD (1 << 12) 1300 #define NHM_REMOTE_DRAM (1 << 13) 1301 #define NHM_LOCAL_DRAM (1 << 14) 1302 #define NHM_NON_DRAM (1 << 15) 1303 1304 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) 1305 #define NHM_REMOTE (NHM_REMOTE_DRAM) 1306 1307 #define NHM_DMND_READ (NHM_DMND_DATA_RD) 1308 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 1309 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 1310 1311 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 1312 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) 1313 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 1314 1315 static __initconst const u64 nehalem_hw_cache_extra_regs 1316 [PERF_COUNT_HW_CACHE_MAX] 1317 [PERF_COUNT_HW_CACHE_OP_MAX] 1318 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1319 { 1320 [ C(LL ) ] = { 1321 [ C(OP_READ) ] = { 1322 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 1323 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 1324 }, 1325 [ C(OP_WRITE) ] = { 1326 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 1327 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 1328 }, 1329 [ C(OP_PREFETCH) ] = { 1330 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 1331 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 1332 }, 1333 }, 1334 [ C(NODE) ] = { 1335 [ C(OP_READ) ] = { 1336 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, 1337 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, 1338 }, 1339 [ C(OP_WRITE) ] = { 1340 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, 1341 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, 1342 }, 1343 [ C(OP_PREFETCH) ] = { 1344 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, 1345 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, 1346 }, 1347 }, 1348 }; 1349 1350 static __initconst const u64 nehalem_hw_cache_event_ids 1351 [PERF_COUNT_HW_CACHE_MAX] 1352 [PERF_COUNT_HW_CACHE_OP_MAX] 1353 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1354 { 1355 [ C(L1D) ] = { 1356 [ C(OP_READ) ] = { 1357 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1358 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1359 }, 1360 [ C(OP_WRITE) ] = { 1361 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1362 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1363 }, 1364 [ C(OP_PREFETCH) ] = { 1365 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1366 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1367 }, 1368 }, 1369 [ C(L1I ) ] = { 1370 [ C(OP_READ) ] = { 1371 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1372 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1373 }, 1374 [ C(OP_WRITE) ] = { 1375 [ C(RESULT_ACCESS) ] = -1, 1376 [ C(RESULT_MISS) ] = -1, 1377 }, 1378 [ C(OP_PREFETCH) ] = { 1379 [ C(RESULT_ACCESS) ] = 0x0, 1380 [ C(RESULT_MISS) ] = 0x0, 1381 }, 1382 }, 1383 [ C(LL ) ] = { 1384 [ C(OP_READ) ] = { 1385 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1386 [ C(RESULT_ACCESS) ] = 0x01b7, 1387 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1388 [ C(RESULT_MISS) ] = 0x01b7, 1389 }, 1390 /* 1391 * Use RFO, not WRITEBACK, because a write miss would typically occur 1392 * on RFO. 1393 */ 1394 [ C(OP_WRITE) ] = { 1395 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1396 [ C(RESULT_ACCESS) ] = 0x01b7, 1397 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1398 [ C(RESULT_MISS) ] = 0x01b7, 1399 }, 1400 [ C(OP_PREFETCH) ] = { 1401 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1402 [ C(RESULT_ACCESS) ] = 0x01b7, 1403 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1404 [ C(RESULT_MISS) ] = 0x01b7, 1405 }, 1406 }, 1407 [ C(DTLB) ] = { 1408 [ C(OP_READ) ] = { 1409 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1410 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1411 }, 1412 [ C(OP_WRITE) ] = { 1413 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1414 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1415 }, 1416 [ C(OP_PREFETCH) ] = { 1417 [ C(RESULT_ACCESS) ] = 0x0, 1418 [ C(RESULT_MISS) ] = 0x0, 1419 }, 1420 }, 1421 [ C(ITLB) ] = { 1422 [ C(OP_READ) ] = { 1423 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1424 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ 1425 }, 1426 [ C(OP_WRITE) ] = { 1427 [ C(RESULT_ACCESS) ] = -1, 1428 [ C(RESULT_MISS) ] = -1, 1429 }, 1430 [ C(OP_PREFETCH) ] = { 1431 [ C(RESULT_ACCESS) ] = -1, 1432 [ C(RESULT_MISS) ] = -1, 1433 }, 1434 }, 1435 [ C(BPU ) ] = { 1436 [ C(OP_READ) ] = { 1437 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1438 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1439 }, 1440 [ C(OP_WRITE) ] = { 1441 [ C(RESULT_ACCESS) ] = -1, 1442 [ C(RESULT_MISS) ] = -1, 1443 }, 1444 [ C(OP_PREFETCH) ] = { 1445 [ C(RESULT_ACCESS) ] = -1, 1446 [ C(RESULT_MISS) ] = -1, 1447 }, 1448 }, 1449 [ C(NODE) ] = { 1450 [ C(OP_READ) ] = { 1451 [ C(RESULT_ACCESS) ] = 0x01b7, 1452 [ C(RESULT_MISS) ] = 0x01b7, 1453 }, 1454 [ C(OP_WRITE) ] = { 1455 [ C(RESULT_ACCESS) ] = 0x01b7, 1456 [ C(RESULT_MISS) ] = 0x01b7, 1457 }, 1458 [ C(OP_PREFETCH) ] = { 1459 [ C(RESULT_ACCESS) ] = 0x01b7, 1460 [ C(RESULT_MISS) ] = 0x01b7, 1461 }, 1462 }, 1463 }; 1464 1465 static __initconst const u64 core2_hw_cache_event_ids 1466 [PERF_COUNT_HW_CACHE_MAX] 1467 [PERF_COUNT_HW_CACHE_OP_MAX] 1468 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1469 { 1470 [ C(L1D) ] = { 1471 [ C(OP_READ) ] = { 1472 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 1473 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 1474 }, 1475 [ C(OP_WRITE) ] = { 1476 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 1477 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 1478 }, 1479 [ C(OP_PREFETCH) ] = { 1480 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ 1481 [ C(RESULT_MISS) ] = 0, 1482 }, 1483 }, 1484 [ C(L1I ) ] = { 1485 [ C(OP_READ) ] = { 1486 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ 1487 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ 1488 }, 1489 [ C(OP_WRITE) ] = { 1490 [ C(RESULT_ACCESS) ] = -1, 1491 [ C(RESULT_MISS) ] = -1, 1492 }, 1493 [ C(OP_PREFETCH) ] = { 1494 [ C(RESULT_ACCESS) ] = 0, 1495 [ C(RESULT_MISS) ] = 0, 1496 }, 1497 }, 1498 [ C(LL ) ] = { 1499 [ C(OP_READ) ] = { 1500 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1501 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1502 }, 1503 [ C(OP_WRITE) ] = { 1504 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1505 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1506 }, 1507 [ C(OP_PREFETCH) ] = { 1508 [ C(RESULT_ACCESS) ] = 0, 1509 [ C(RESULT_MISS) ] = 0, 1510 }, 1511 }, 1512 [ C(DTLB) ] = { 1513 [ C(OP_READ) ] = { 1514 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1515 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ 1516 }, 1517 [ C(OP_WRITE) ] = { 1518 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1519 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ 1520 }, 1521 [ C(OP_PREFETCH) ] = { 1522 [ C(RESULT_ACCESS) ] = 0, 1523 [ C(RESULT_MISS) ] = 0, 1524 }, 1525 }, 1526 [ C(ITLB) ] = { 1527 [ C(OP_READ) ] = { 1528 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1529 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ 1530 }, 1531 [ C(OP_WRITE) ] = { 1532 [ C(RESULT_ACCESS) ] = -1, 1533 [ C(RESULT_MISS) ] = -1, 1534 }, 1535 [ C(OP_PREFETCH) ] = { 1536 [ C(RESULT_ACCESS) ] = -1, 1537 [ C(RESULT_MISS) ] = -1, 1538 }, 1539 }, 1540 [ C(BPU ) ] = { 1541 [ C(OP_READ) ] = { 1542 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1543 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1544 }, 1545 [ C(OP_WRITE) ] = { 1546 [ C(RESULT_ACCESS) ] = -1, 1547 [ C(RESULT_MISS) ] = -1, 1548 }, 1549 [ C(OP_PREFETCH) ] = { 1550 [ C(RESULT_ACCESS) ] = -1, 1551 [ C(RESULT_MISS) ] = -1, 1552 }, 1553 }, 1554 }; 1555 1556 static __initconst const u64 atom_hw_cache_event_ids 1557 [PERF_COUNT_HW_CACHE_MAX] 1558 [PERF_COUNT_HW_CACHE_OP_MAX] 1559 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1560 { 1561 [ C(L1D) ] = { 1562 [ C(OP_READ) ] = { 1563 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ 1564 [ C(RESULT_MISS) ] = 0, 1565 }, 1566 [ C(OP_WRITE) ] = { 1567 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ 1568 [ C(RESULT_MISS) ] = 0, 1569 }, 1570 [ C(OP_PREFETCH) ] = { 1571 [ C(RESULT_ACCESS) ] = 0x0, 1572 [ C(RESULT_MISS) ] = 0, 1573 }, 1574 }, 1575 [ C(L1I ) ] = { 1576 [ C(OP_READ) ] = { 1577 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1578 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1579 }, 1580 [ C(OP_WRITE) ] = { 1581 [ C(RESULT_ACCESS) ] = -1, 1582 [ C(RESULT_MISS) ] = -1, 1583 }, 1584 [ C(OP_PREFETCH) ] = { 1585 [ C(RESULT_ACCESS) ] = 0, 1586 [ C(RESULT_MISS) ] = 0, 1587 }, 1588 }, 1589 [ C(LL ) ] = { 1590 [ C(OP_READ) ] = { 1591 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1592 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1593 }, 1594 [ C(OP_WRITE) ] = { 1595 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1596 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1597 }, 1598 [ C(OP_PREFETCH) ] = { 1599 [ C(RESULT_ACCESS) ] = 0, 1600 [ C(RESULT_MISS) ] = 0, 1601 }, 1602 }, 1603 [ C(DTLB) ] = { 1604 [ C(OP_READ) ] = { 1605 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ 1606 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ 1607 }, 1608 [ C(OP_WRITE) ] = { 1609 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ 1610 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ 1611 }, 1612 [ C(OP_PREFETCH) ] = { 1613 [ C(RESULT_ACCESS) ] = 0, 1614 [ C(RESULT_MISS) ] = 0, 1615 }, 1616 }, 1617 [ C(ITLB) ] = { 1618 [ C(OP_READ) ] = { 1619 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1620 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1621 }, 1622 [ C(OP_WRITE) ] = { 1623 [ C(RESULT_ACCESS) ] = -1, 1624 [ C(RESULT_MISS) ] = -1, 1625 }, 1626 [ C(OP_PREFETCH) ] = { 1627 [ C(RESULT_ACCESS) ] = -1, 1628 [ C(RESULT_MISS) ] = -1, 1629 }, 1630 }, 1631 [ C(BPU ) ] = { 1632 [ C(OP_READ) ] = { 1633 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1634 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1635 }, 1636 [ C(OP_WRITE) ] = { 1637 [ C(RESULT_ACCESS) ] = -1, 1638 [ C(RESULT_MISS) ] = -1, 1639 }, 1640 [ C(OP_PREFETCH) ] = { 1641 [ C(RESULT_ACCESS) ] = -1, 1642 [ C(RESULT_MISS) ] = -1, 1643 }, 1644 }, 1645 }; 1646 1647 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); 1648 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); 1649 /* no_alloc_cycles.not_delivered */ 1650 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, 1651 "event=0xca,umask=0x50"); 1652 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); 1653 /* uops_retired.all */ 1654 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, 1655 "event=0xc2,umask=0x10"); 1656 /* uops_retired.all */ 1657 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, 1658 "event=0xc2,umask=0x10"); 1659 1660 static struct attribute *slm_events_attrs[] = { 1661 EVENT_PTR(td_total_slots_slm), 1662 EVENT_PTR(td_total_slots_scale_slm), 1663 EVENT_PTR(td_fetch_bubbles_slm), 1664 EVENT_PTR(td_fetch_bubbles_scale_slm), 1665 EVENT_PTR(td_slots_issued_slm), 1666 EVENT_PTR(td_slots_retired_slm), 1667 NULL 1668 }; 1669 1670 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 1671 { 1672 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1673 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 1674 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), 1675 EVENT_EXTRA_END 1676 }; 1677 1678 #define SLM_DMND_READ SNB_DMND_DATA_RD 1679 #define SLM_DMND_WRITE SNB_DMND_RFO 1680 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1681 1682 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) 1683 #define SLM_LLC_ACCESS SNB_RESP_ANY 1684 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) 1685 1686 static __initconst const u64 slm_hw_cache_extra_regs 1687 [PERF_COUNT_HW_CACHE_MAX] 1688 [PERF_COUNT_HW_CACHE_OP_MAX] 1689 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1690 { 1691 [ C(LL ) ] = { 1692 [ C(OP_READ) ] = { 1693 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1694 [ C(RESULT_MISS) ] = 0, 1695 }, 1696 [ C(OP_WRITE) ] = { 1697 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1698 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, 1699 }, 1700 [ C(OP_PREFETCH) ] = { 1701 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, 1702 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, 1703 }, 1704 }, 1705 }; 1706 1707 static __initconst const u64 slm_hw_cache_event_ids 1708 [PERF_COUNT_HW_CACHE_MAX] 1709 [PERF_COUNT_HW_CACHE_OP_MAX] 1710 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1711 { 1712 [ C(L1D) ] = { 1713 [ C(OP_READ) ] = { 1714 [ C(RESULT_ACCESS) ] = 0, 1715 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ 1716 }, 1717 [ C(OP_WRITE) ] = { 1718 [ C(RESULT_ACCESS) ] = 0, 1719 [ C(RESULT_MISS) ] = 0, 1720 }, 1721 [ C(OP_PREFETCH) ] = { 1722 [ C(RESULT_ACCESS) ] = 0, 1723 [ C(RESULT_MISS) ] = 0, 1724 }, 1725 }, 1726 [ C(L1I ) ] = { 1727 [ C(OP_READ) ] = { 1728 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ 1729 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ 1730 }, 1731 [ C(OP_WRITE) ] = { 1732 [ C(RESULT_ACCESS) ] = -1, 1733 [ C(RESULT_MISS) ] = -1, 1734 }, 1735 [ C(OP_PREFETCH) ] = { 1736 [ C(RESULT_ACCESS) ] = 0, 1737 [ C(RESULT_MISS) ] = 0, 1738 }, 1739 }, 1740 [ C(LL ) ] = { 1741 [ C(OP_READ) ] = { 1742 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1743 [ C(RESULT_ACCESS) ] = 0x01b7, 1744 [ C(RESULT_MISS) ] = 0, 1745 }, 1746 [ C(OP_WRITE) ] = { 1747 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1748 [ C(RESULT_ACCESS) ] = 0x01b7, 1749 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1750 [ C(RESULT_MISS) ] = 0x01b7, 1751 }, 1752 [ C(OP_PREFETCH) ] = { 1753 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1754 [ C(RESULT_ACCESS) ] = 0x01b7, 1755 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1756 [ C(RESULT_MISS) ] = 0x01b7, 1757 }, 1758 }, 1759 [ C(DTLB) ] = { 1760 [ C(OP_READ) ] = { 1761 [ C(RESULT_ACCESS) ] = 0, 1762 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ 1763 }, 1764 [ C(OP_WRITE) ] = { 1765 [ C(RESULT_ACCESS) ] = 0, 1766 [ C(RESULT_MISS) ] = 0, 1767 }, 1768 [ C(OP_PREFETCH) ] = { 1769 [ C(RESULT_ACCESS) ] = 0, 1770 [ C(RESULT_MISS) ] = 0, 1771 }, 1772 }, 1773 [ C(ITLB) ] = { 1774 [ C(OP_READ) ] = { 1775 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1776 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ 1777 }, 1778 [ C(OP_WRITE) ] = { 1779 [ C(RESULT_ACCESS) ] = -1, 1780 [ C(RESULT_MISS) ] = -1, 1781 }, 1782 [ C(OP_PREFETCH) ] = { 1783 [ C(RESULT_ACCESS) ] = -1, 1784 [ C(RESULT_MISS) ] = -1, 1785 }, 1786 }, 1787 [ C(BPU ) ] = { 1788 [ C(OP_READ) ] = { 1789 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1790 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1791 }, 1792 [ C(OP_WRITE) ] = { 1793 [ C(RESULT_ACCESS) ] = -1, 1794 [ C(RESULT_MISS) ] = -1, 1795 }, 1796 [ C(OP_PREFETCH) ] = { 1797 [ C(RESULT_ACCESS) ] = -1, 1798 [ C(RESULT_MISS) ] = -1, 1799 }, 1800 }, 1801 }; 1802 1803 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); 1804 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); 1805 /* UOPS_NOT_DELIVERED.ANY */ 1806 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); 1807 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ 1808 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); 1809 /* UOPS_RETIRED.ANY */ 1810 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); 1811 /* UOPS_ISSUED.ANY */ 1812 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); 1813 1814 static struct attribute *glm_events_attrs[] = { 1815 EVENT_PTR(td_total_slots_glm), 1816 EVENT_PTR(td_total_slots_scale_glm), 1817 EVENT_PTR(td_fetch_bubbles_glm), 1818 EVENT_PTR(td_recovery_bubbles_glm), 1819 EVENT_PTR(td_slots_issued_glm), 1820 EVENT_PTR(td_slots_retired_glm), 1821 NULL 1822 }; 1823 1824 static struct extra_reg intel_glm_extra_regs[] __read_mostly = { 1825 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1826 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), 1827 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), 1828 EVENT_EXTRA_END 1829 }; 1830 1831 #define GLM_DEMAND_DATA_RD BIT_ULL(0) 1832 #define GLM_DEMAND_RFO BIT_ULL(1) 1833 #define GLM_ANY_RESPONSE BIT_ULL(16) 1834 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33) 1835 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD 1836 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO 1837 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1838 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE 1839 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) 1840 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM) 1841 1842 static __initconst const u64 glm_hw_cache_event_ids 1843 [PERF_COUNT_HW_CACHE_MAX] 1844 [PERF_COUNT_HW_CACHE_OP_MAX] 1845 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1846 [C(L1D)] = { 1847 [C(OP_READ)] = { 1848 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1849 [C(RESULT_MISS)] = 0x0, 1850 }, 1851 [C(OP_WRITE)] = { 1852 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1853 [C(RESULT_MISS)] = 0x0, 1854 }, 1855 [C(OP_PREFETCH)] = { 1856 [C(RESULT_ACCESS)] = 0x0, 1857 [C(RESULT_MISS)] = 0x0, 1858 }, 1859 }, 1860 [C(L1I)] = { 1861 [C(OP_READ)] = { 1862 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1863 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1864 }, 1865 [C(OP_WRITE)] = { 1866 [C(RESULT_ACCESS)] = -1, 1867 [C(RESULT_MISS)] = -1, 1868 }, 1869 [C(OP_PREFETCH)] = { 1870 [C(RESULT_ACCESS)] = 0x0, 1871 [C(RESULT_MISS)] = 0x0, 1872 }, 1873 }, 1874 [C(LL)] = { 1875 [C(OP_READ)] = { 1876 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1877 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1878 }, 1879 [C(OP_WRITE)] = { 1880 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1881 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1882 }, 1883 [C(OP_PREFETCH)] = { 1884 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1885 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1886 }, 1887 }, 1888 [C(DTLB)] = { 1889 [C(OP_READ)] = { 1890 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1891 [C(RESULT_MISS)] = 0x0, 1892 }, 1893 [C(OP_WRITE)] = { 1894 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1895 [C(RESULT_MISS)] = 0x0, 1896 }, 1897 [C(OP_PREFETCH)] = { 1898 [C(RESULT_ACCESS)] = 0x0, 1899 [C(RESULT_MISS)] = 0x0, 1900 }, 1901 }, 1902 [C(ITLB)] = { 1903 [C(OP_READ)] = { 1904 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 1905 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 1906 }, 1907 [C(OP_WRITE)] = { 1908 [C(RESULT_ACCESS)] = -1, 1909 [C(RESULT_MISS)] = -1, 1910 }, 1911 [C(OP_PREFETCH)] = { 1912 [C(RESULT_ACCESS)] = -1, 1913 [C(RESULT_MISS)] = -1, 1914 }, 1915 }, 1916 [C(BPU)] = { 1917 [C(OP_READ)] = { 1918 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1919 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1920 }, 1921 [C(OP_WRITE)] = { 1922 [C(RESULT_ACCESS)] = -1, 1923 [C(RESULT_MISS)] = -1, 1924 }, 1925 [C(OP_PREFETCH)] = { 1926 [C(RESULT_ACCESS)] = -1, 1927 [C(RESULT_MISS)] = -1, 1928 }, 1929 }, 1930 }; 1931 1932 static __initconst const u64 glm_hw_cache_extra_regs 1933 [PERF_COUNT_HW_CACHE_MAX] 1934 [PERF_COUNT_HW_CACHE_OP_MAX] 1935 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1936 [C(LL)] = { 1937 [C(OP_READ)] = { 1938 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 1939 GLM_LLC_ACCESS, 1940 [C(RESULT_MISS)] = GLM_DEMAND_READ| 1941 GLM_LLC_MISS, 1942 }, 1943 [C(OP_WRITE)] = { 1944 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 1945 GLM_LLC_ACCESS, 1946 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 1947 GLM_LLC_MISS, 1948 }, 1949 [C(OP_PREFETCH)] = { 1950 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH| 1951 GLM_LLC_ACCESS, 1952 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH| 1953 GLM_LLC_MISS, 1954 }, 1955 }, 1956 }; 1957 1958 static __initconst const u64 glp_hw_cache_event_ids 1959 [PERF_COUNT_HW_CACHE_MAX] 1960 [PERF_COUNT_HW_CACHE_OP_MAX] 1961 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1962 [C(L1D)] = { 1963 [C(OP_READ)] = { 1964 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1965 [C(RESULT_MISS)] = 0x0, 1966 }, 1967 [C(OP_WRITE)] = { 1968 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1969 [C(RESULT_MISS)] = 0x0, 1970 }, 1971 [C(OP_PREFETCH)] = { 1972 [C(RESULT_ACCESS)] = 0x0, 1973 [C(RESULT_MISS)] = 0x0, 1974 }, 1975 }, 1976 [C(L1I)] = { 1977 [C(OP_READ)] = { 1978 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1979 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1980 }, 1981 [C(OP_WRITE)] = { 1982 [C(RESULT_ACCESS)] = -1, 1983 [C(RESULT_MISS)] = -1, 1984 }, 1985 [C(OP_PREFETCH)] = { 1986 [C(RESULT_ACCESS)] = 0x0, 1987 [C(RESULT_MISS)] = 0x0, 1988 }, 1989 }, 1990 [C(LL)] = { 1991 [C(OP_READ)] = { 1992 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1993 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1994 }, 1995 [C(OP_WRITE)] = { 1996 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1997 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1998 }, 1999 [C(OP_PREFETCH)] = { 2000 [C(RESULT_ACCESS)] = 0x0, 2001 [C(RESULT_MISS)] = 0x0, 2002 }, 2003 }, 2004 [C(DTLB)] = { 2005 [C(OP_READ)] = { 2006 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 2007 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 2008 }, 2009 [C(OP_WRITE)] = { 2010 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2011 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 2012 }, 2013 [C(OP_PREFETCH)] = { 2014 [C(RESULT_ACCESS)] = 0x0, 2015 [C(RESULT_MISS)] = 0x0, 2016 }, 2017 }, 2018 [C(ITLB)] = { 2019 [C(OP_READ)] = { 2020 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 2021 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 2022 }, 2023 [C(OP_WRITE)] = { 2024 [C(RESULT_ACCESS)] = -1, 2025 [C(RESULT_MISS)] = -1, 2026 }, 2027 [C(OP_PREFETCH)] = { 2028 [C(RESULT_ACCESS)] = -1, 2029 [C(RESULT_MISS)] = -1, 2030 }, 2031 }, 2032 [C(BPU)] = { 2033 [C(OP_READ)] = { 2034 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 2035 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 2036 }, 2037 [C(OP_WRITE)] = { 2038 [C(RESULT_ACCESS)] = -1, 2039 [C(RESULT_MISS)] = -1, 2040 }, 2041 [C(OP_PREFETCH)] = { 2042 [C(RESULT_ACCESS)] = -1, 2043 [C(RESULT_MISS)] = -1, 2044 }, 2045 }, 2046 }; 2047 2048 static __initconst const u64 glp_hw_cache_extra_regs 2049 [PERF_COUNT_HW_CACHE_MAX] 2050 [PERF_COUNT_HW_CACHE_OP_MAX] 2051 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2052 [C(LL)] = { 2053 [C(OP_READ)] = { 2054 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 2055 GLM_LLC_ACCESS, 2056 [C(RESULT_MISS)] = GLM_DEMAND_READ| 2057 GLM_LLC_MISS, 2058 }, 2059 [C(OP_WRITE)] = { 2060 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 2061 GLM_LLC_ACCESS, 2062 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 2063 GLM_LLC_MISS, 2064 }, 2065 [C(OP_PREFETCH)] = { 2066 [C(RESULT_ACCESS)] = 0x0, 2067 [C(RESULT_MISS)] = 0x0, 2068 }, 2069 }, 2070 }; 2071 2072 #define TNT_LOCAL_DRAM BIT_ULL(26) 2073 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD 2074 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO 2075 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE 2076 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ 2077 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) 2078 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) 2079 2080 static __initconst const u64 tnt_hw_cache_extra_regs 2081 [PERF_COUNT_HW_CACHE_MAX] 2082 [PERF_COUNT_HW_CACHE_OP_MAX] 2083 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2084 [C(LL)] = { 2085 [C(OP_READ)] = { 2086 [C(RESULT_ACCESS)] = TNT_DEMAND_READ| 2087 TNT_LLC_ACCESS, 2088 [C(RESULT_MISS)] = TNT_DEMAND_READ| 2089 TNT_LLC_MISS, 2090 }, 2091 [C(OP_WRITE)] = { 2092 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| 2093 TNT_LLC_ACCESS, 2094 [C(RESULT_MISS)] = TNT_DEMAND_WRITE| 2095 TNT_LLC_MISS, 2096 }, 2097 [C(OP_PREFETCH)] = { 2098 [C(RESULT_ACCESS)] = 0x0, 2099 [C(RESULT_MISS)] = 0x0, 2100 }, 2101 }, 2102 }; 2103 2104 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0"); 2105 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0"); 2106 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6"); 2107 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0"); 2108 2109 static struct attribute *tnt_events_attrs[] = { 2110 EVENT_PTR(td_fe_bound_tnt), 2111 EVENT_PTR(td_retiring_tnt), 2112 EVENT_PTR(td_bad_spec_tnt), 2113 EVENT_PTR(td_be_bound_tnt), 2114 NULL, 2115 }; 2116 2117 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { 2118 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2119 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), 2120 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), 2121 EVENT_EXTRA_END 2122 }; 2123 2124 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3"); 2125 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6"); 2126 2127 static struct attribute *grt_mem_attrs[] = { 2128 EVENT_PTR(mem_ld_grt), 2129 EVENT_PTR(mem_st_grt), 2130 NULL 2131 }; 2132 2133 static struct extra_reg intel_grt_extra_regs[] __read_mostly = { 2134 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2135 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 2136 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 2137 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2138 EVENT_EXTRA_END 2139 }; 2140 2141 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0"); 2142 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0"); 2143 2144 static struct attribute *cmt_events_attrs[] = { 2145 EVENT_PTR(td_fe_bound_tnt), 2146 EVENT_PTR(td_retiring_cmt), 2147 EVENT_PTR(td_bad_spec_cmt), 2148 EVENT_PTR(td_be_bound_tnt), 2149 NULL 2150 }; 2151 2152 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = { 2153 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2154 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0), 2155 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1), 2156 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2157 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), 2158 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), 2159 EVENT_EXTRA_END 2160 }; 2161 2162 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 2163 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 2164 #define KNL_MCDRAM_LOCAL BIT_ULL(21) 2165 #define KNL_MCDRAM_FAR BIT_ULL(22) 2166 #define KNL_DDR_LOCAL BIT_ULL(23) 2167 #define KNL_DDR_FAR BIT_ULL(24) 2168 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ 2169 KNL_DDR_LOCAL | KNL_DDR_FAR) 2170 #define KNL_L2_READ SLM_DMND_READ 2171 #define KNL_L2_WRITE SLM_DMND_WRITE 2172 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH 2173 #define KNL_L2_ACCESS SLM_LLC_ACCESS 2174 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ 2175 KNL_DRAM_ANY | SNB_SNP_ANY | \ 2176 SNB_NON_DRAM) 2177 2178 static __initconst const u64 knl_hw_cache_extra_regs 2179 [PERF_COUNT_HW_CACHE_MAX] 2180 [PERF_COUNT_HW_CACHE_OP_MAX] 2181 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2182 [C(LL)] = { 2183 [C(OP_READ)] = { 2184 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, 2185 [C(RESULT_MISS)] = 0, 2186 }, 2187 [C(OP_WRITE)] = { 2188 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, 2189 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS, 2190 }, 2191 [C(OP_PREFETCH)] = { 2192 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, 2193 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS, 2194 }, 2195 }, 2196 }; 2197 2198 /* 2199 * Used from PMIs where the LBRs are already disabled. 2200 * 2201 * This function could be called consecutively. It is required to remain in 2202 * disabled state if called consecutively. 2203 * 2204 * During consecutive calls, the same disable value will be written to related 2205 * registers, so the PMU state remains unchanged. 2206 * 2207 * intel_bts events don't coexist with intel PMU's BTS events because of 2208 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them 2209 * disabled around intel PMU's event batching etc, only inside the PMI handler. 2210 * 2211 * Avoid PEBS_ENABLE MSR access in PMIs. 2212 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore. 2213 * It doesn't matter if the PEBS is enabled or not. 2214 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to 2215 * access PEBS_ENABLE MSR in disable_all()/enable_all(). 2216 * However, there are some cases which may change PEBS status, e.g. PMI 2217 * throttle. The PEBS_ENABLE should be updated where the status changes. 2218 */ 2219 static __always_inline void __intel_pmu_disable_all(bool bts) 2220 { 2221 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2222 2223 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2224 2225 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2226 intel_pmu_disable_bts(); 2227 } 2228 2229 static __always_inline void intel_pmu_disable_all(void) 2230 { 2231 __intel_pmu_disable_all(true); 2232 intel_pmu_pebs_disable_all(); 2233 intel_pmu_lbr_disable_all(); 2234 } 2235 2236 static void __intel_pmu_enable_all(int added, bool pmi) 2237 { 2238 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2239 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2240 2241 intel_pmu_lbr_enable_all(pmi); 2242 2243 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { 2244 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2245 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; 2246 } 2247 2248 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 2249 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 2250 2251 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 2252 struct perf_event *event = 2253 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 2254 2255 if (WARN_ON_ONCE(!event)) 2256 return; 2257 2258 intel_pmu_enable_bts(event->hw.config); 2259 } 2260 } 2261 2262 static void intel_pmu_enable_all(int added) 2263 { 2264 intel_pmu_pebs_enable_all(); 2265 __intel_pmu_enable_all(added, false); 2266 } 2267 2268 static noinline int 2269 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, 2270 unsigned int cnt, unsigned long flags) 2271 { 2272 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2273 2274 intel_pmu_lbr_read(); 2275 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); 2276 2277 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 2278 intel_pmu_enable_all(0); 2279 local_irq_restore(flags); 2280 return cnt; 2281 } 2282 2283 static int 2284 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2285 { 2286 unsigned long flags; 2287 2288 /* must not have branches... */ 2289 local_irq_save(flags); 2290 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2291 __intel_pmu_lbr_disable(); 2292 /* ... until here */ 2293 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2294 } 2295 2296 static int 2297 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2298 { 2299 unsigned long flags; 2300 2301 /* must not have branches... */ 2302 local_irq_save(flags); 2303 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2304 __intel_pmu_arch_lbr_disable(); 2305 /* ... until here */ 2306 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2307 } 2308 2309 /* 2310 * Workaround for: 2311 * Intel Errata AAK100 (model 26) 2312 * Intel Errata AAP53 (model 30) 2313 * Intel Errata BD53 (model 44) 2314 * 2315 * The official story: 2316 * These chips need to be 'reset' when adding counters by programming the 2317 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 2318 * in sequence on the same PMC or on different PMCs. 2319 * 2320 * In practice it appears some of these events do in fact count, and 2321 * we need to program all 4 events. 2322 */ 2323 static void intel_pmu_nhm_workaround(void) 2324 { 2325 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2326 static const unsigned long nhm_magic[4] = { 2327 0x4300B5, 2328 0x4300D2, 2329 0x4300B1, 2330 0x4300B1 2331 }; 2332 struct perf_event *event; 2333 int i; 2334 2335 /* 2336 * The Errata requires below steps: 2337 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; 2338 * 2) Configure 4 PERFEVTSELx with the magic events and clear 2339 * the corresponding PMCx; 2340 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; 2341 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; 2342 * 5) Clear 4 pairs of ERFEVTSELx and PMCx; 2343 */ 2344 2345 /* 2346 * The real steps we choose are a little different from above. 2347 * A) To reduce MSR operations, we don't run step 1) as they 2348 * are already cleared before this function is called; 2349 * B) Call x86_perf_event_update to save PMCx before configuring 2350 * PERFEVTSELx with magic number; 2351 * C) With step 5), we do clear only when the PERFEVTSELx is 2352 * not used currently. 2353 * D) Call x86_perf_event_set_period to restore PMCx; 2354 */ 2355 2356 /* We always operate 4 pairs of PERF Counters */ 2357 for (i = 0; i < 4; i++) { 2358 event = cpuc->events[i]; 2359 if (event) 2360 static_call(x86_pmu_update)(event); 2361 } 2362 2363 for (i = 0; i < 4; i++) { 2364 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2365 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2366 } 2367 2368 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2369 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2370 2371 for (i = 0; i < 4; i++) { 2372 event = cpuc->events[i]; 2373 2374 if (event) { 2375 static_call(x86_pmu_set_period)(event); 2376 __x86_pmu_enable_event(&event->hw, 2377 ARCH_PERFMON_EVENTSEL_ENABLE); 2378 } else 2379 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2380 } 2381 } 2382 2383 static void intel_pmu_nhm_enable_all(int added) 2384 { 2385 if (added) 2386 intel_pmu_nhm_workaround(); 2387 intel_pmu_enable_all(added); 2388 } 2389 2390 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) 2391 { 2392 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; 2393 2394 if (cpuc->tfa_shadow != val) { 2395 cpuc->tfa_shadow = val; 2396 wrmsrl(MSR_TSX_FORCE_ABORT, val); 2397 } 2398 } 2399 2400 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2401 { 2402 /* 2403 * We're going to use PMC3, make sure TFA is set before we touch it. 2404 */ 2405 if (cntr == 3) 2406 intel_set_tfa(cpuc, true); 2407 } 2408 2409 static void intel_tfa_pmu_enable_all(int added) 2410 { 2411 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2412 2413 /* 2414 * If we find PMC3 is no longer used when we enable the PMU, we can 2415 * clear TFA. 2416 */ 2417 if (!test_bit(3, cpuc->active_mask)) 2418 intel_set_tfa(cpuc, false); 2419 2420 intel_pmu_enable_all(added); 2421 } 2422 2423 static inline u64 intel_pmu_get_status(void) 2424 { 2425 u64 status; 2426 2427 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 2428 2429 return status; 2430 } 2431 2432 static inline void intel_pmu_ack_status(u64 ack) 2433 { 2434 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2435 } 2436 2437 static inline bool event_is_checkpointed(struct perf_event *event) 2438 { 2439 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; 2440 } 2441 2442 static inline void intel_set_masks(struct perf_event *event, int idx) 2443 { 2444 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2445 2446 if (event->attr.exclude_host) 2447 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2448 if (event->attr.exclude_guest) 2449 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2450 if (event_is_checkpointed(event)) 2451 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2452 } 2453 2454 static inline void intel_clear_masks(struct perf_event *event, int idx) 2455 { 2456 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2457 2458 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2459 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2460 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2461 } 2462 2463 static void intel_pmu_disable_fixed(struct perf_event *event) 2464 { 2465 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2466 struct hw_perf_event *hwc = &event->hw; 2467 int idx = hwc->idx; 2468 u64 mask; 2469 2470 if (is_topdown_idx(idx)) { 2471 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2472 2473 /* 2474 * When there are other active TopDown events, 2475 * don't disable the fixed counter 3. 2476 */ 2477 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2478 return; 2479 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2480 } 2481 2482 intel_clear_masks(event, idx); 2483 2484 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); 2485 cpuc->fixed_ctrl_val &= ~mask; 2486 } 2487 2488 static void intel_pmu_disable_event(struct perf_event *event) 2489 { 2490 struct hw_perf_event *hwc = &event->hw; 2491 int idx = hwc->idx; 2492 2493 switch (idx) { 2494 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2495 intel_clear_masks(event, idx); 2496 x86_pmu_disable_event(event); 2497 break; 2498 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2499 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2500 intel_pmu_disable_fixed(event); 2501 break; 2502 case INTEL_PMC_IDX_FIXED_BTS: 2503 intel_pmu_disable_bts(); 2504 intel_pmu_drain_bts_buffer(); 2505 return; 2506 case INTEL_PMC_IDX_FIXED_VLBR: 2507 intel_clear_masks(event, idx); 2508 break; 2509 default: 2510 intel_clear_masks(event, idx); 2511 pr_warn("Failed to disable the event with invalid index %d\n", 2512 idx); 2513 return; 2514 } 2515 2516 /* 2517 * Needs to be called after x86_pmu_disable_event, 2518 * so we don't trigger the event without PEBS bit set. 2519 */ 2520 if (unlikely(event->attr.precise_ip)) 2521 intel_pmu_pebs_disable(event); 2522 } 2523 2524 static void intel_pmu_assign_event(struct perf_event *event, int idx) 2525 { 2526 if (is_pebs_pt(event)) 2527 perf_report_aux_output_id(event, idx); 2528 } 2529 2530 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) 2531 { 2532 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; 2533 } 2534 2535 static void intel_pmu_del_event(struct perf_event *event) 2536 { 2537 if (intel_pmu_needs_branch_stack(event)) 2538 intel_pmu_lbr_del(event); 2539 if (event->attr.precise_ip) 2540 intel_pmu_pebs_del(event); 2541 } 2542 2543 static int icl_set_topdown_event_period(struct perf_event *event) 2544 { 2545 struct hw_perf_event *hwc = &event->hw; 2546 s64 left = local64_read(&hwc->period_left); 2547 2548 /* 2549 * The values in PERF_METRICS MSR are derived from fixed counter 3. 2550 * Software should start both registers, PERF_METRICS and fixed 2551 * counter 3, from zero. 2552 * Clear PERF_METRICS and Fixed counter 3 in initialization. 2553 * After that, both MSRs will be cleared for each read. 2554 * Don't need to clear them again. 2555 */ 2556 if (left == x86_pmu.max_period) { 2557 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2558 wrmsrl(MSR_PERF_METRICS, 0); 2559 hwc->saved_slots = 0; 2560 hwc->saved_metric = 0; 2561 } 2562 2563 if ((hwc->saved_slots) && is_slots_event(event)) { 2564 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2565 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); 2566 } 2567 2568 perf_event_update_userpage(event); 2569 2570 return 0; 2571 } 2572 2573 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); 2574 2575 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) 2576 { 2577 u32 val; 2578 2579 /* 2580 * The metric is reported as an 8bit integer fraction 2581 * summing up to 0xff. 2582 * slots-in-metric = (Metric / 0xff) * slots 2583 */ 2584 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; 2585 return mul_u64_u32_div(slots, val, 0xff); 2586 } 2587 2588 static u64 icl_get_topdown_value(struct perf_event *event, 2589 u64 slots, u64 metrics) 2590 { 2591 int idx = event->hw.idx; 2592 u64 delta; 2593 2594 if (is_metric_idx(idx)) 2595 delta = icl_get_metrics_event_value(metrics, slots, idx); 2596 else 2597 delta = slots; 2598 2599 return delta; 2600 } 2601 2602 static void __icl_update_topdown_event(struct perf_event *event, 2603 u64 slots, u64 metrics, 2604 u64 last_slots, u64 last_metrics) 2605 { 2606 u64 delta, last = 0; 2607 2608 delta = icl_get_topdown_value(event, slots, metrics); 2609 if (last_slots) 2610 last = icl_get_topdown_value(event, last_slots, last_metrics); 2611 2612 /* 2613 * The 8bit integer fraction of metric may be not accurate, 2614 * especially when the changes is very small. 2615 * For example, if only a few bad_spec happens, the fraction 2616 * may be reduced from 1 to 0. If so, the bad_spec event value 2617 * will be 0 which is definitely less than the last value. 2618 * Avoid update event->count for this case. 2619 */ 2620 if (delta > last) { 2621 delta -= last; 2622 local64_add(delta, &event->count); 2623 } 2624 } 2625 2626 static void update_saved_topdown_regs(struct perf_event *event, u64 slots, 2627 u64 metrics, int metric_end) 2628 { 2629 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2630 struct perf_event *other; 2631 int idx; 2632 2633 event->hw.saved_slots = slots; 2634 event->hw.saved_metric = metrics; 2635 2636 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2637 if (!is_topdown_idx(idx)) 2638 continue; 2639 other = cpuc->events[idx]; 2640 other->hw.saved_slots = slots; 2641 other->hw.saved_metric = metrics; 2642 } 2643 } 2644 2645 /* 2646 * Update all active Topdown events. 2647 * 2648 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be 2649 * modify by a NMI. PMU has to be disabled before calling this function. 2650 */ 2651 2652 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end) 2653 { 2654 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2655 struct perf_event *other; 2656 u64 slots, metrics; 2657 bool reset = true; 2658 int idx; 2659 2660 /* read Fixed counter 3 */ 2661 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); 2662 if (!slots) 2663 return 0; 2664 2665 /* read PERF_METRICS */ 2666 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); 2667 2668 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2669 if (!is_topdown_idx(idx)) 2670 continue; 2671 other = cpuc->events[idx]; 2672 __icl_update_topdown_event(other, slots, metrics, 2673 event ? event->hw.saved_slots : 0, 2674 event ? event->hw.saved_metric : 0); 2675 } 2676 2677 /* 2678 * Check and update this event, which may have been cleared 2679 * in active_mask e.g. x86_pmu_stop() 2680 */ 2681 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { 2682 __icl_update_topdown_event(event, slots, metrics, 2683 event->hw.saved_slots, 2684 event->hw.saved_metric); 2685 2686 /* 2687 * In x86_pmu_stop(), the event is cleared in active_mask first, 2688 * then drain the delta, which indicates context switch for 2689 * counting. 2690 * Save metric and slots for context switch. 2691 * Don't need to reset the PERF_METRICS and Fixed counter 3. 2692 * Because the values will be restored in next schedule in. 2693 */ 2694 update_saved_topdown_regs(event, slots, metrics, metric_end); 2695 reset = false; 2696 } 2697 2698 if (reset) { 2699 /* The fixed counter 3 has to be written before the PERF_METRICS. */ 2700 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2701 wrmsrl(MSR_PERF_METRICS, 0); 2702 if (event) 2703 update_saved_topdown_regs(event, 0, 0, metric_end); 2704 } 2705 2706 return slots; 2707 } 2708 2709 static u64 icl_update_topdown_event(struct perf_event *event) 2710 { 2711 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE + 2712 x86_pmu.num_topdown_events - 1); 2713 } 2714 2715 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); 2716 2717 static void intel_pmu_read_topdown_event(struct perf_event *event) 2718 { 2719 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2720 2721 /* Only need to call update_topdown_event() once for group read. */ 2722 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && 2723 !is_slots_event(event)) 2724 return; 2725 2726 perf_pmu_disable(event->pmu); 2727 static_call(intel_pmu_update_topdown_event)(event); 2728 perf_pmu_enable(event->pmu); 2729 } 2730 2731 static void intel_pmu_read_event(struct perf_event *event) 2732 { 2733 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2734 intel_pmu_auto_reload_read(event); 2735 else if (is_topdown_count(event)) 2736 intel_pmu_read_topdown_event(event); 2737 else 2738 x86_perf_event_update(event); 2739 } 2740 2741 static void intel_pmu_enable_fixed(struct perf_event *event) 2742 { 2743 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2744 struct hw_perf_event *hwc = &event->hw; 2745 u64 mask, bits = 0; 2746 int idx = hwc->idx; 2747 2748 if (is_topdown_idx(idx)) { 2749 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2750 /* 2751 * When there are other active TopDown events, 2752 * don't enable the fixed counter 3 again. 2753 */ 2754 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2755 return; 2756 2757 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2758 } 2759 2760 intel_set_masks(event, idx); 2761 2762 /* 2763 * Enable IRQ generation (0x8), if not PEBS, 2764 * and enable ring-3 counting (0x2) and ring-0 counting (0x1) 2765 * if requested: 2766 */ 2767 if (!event->attr.precise_ip) 2768 bits |= INTEL_FIXED_0_ENABLE_PMI; 2769 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) 2770 bits |= INTEL_FIXED_0_USER; 2771 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 2772 bits |= INTEL_FIXED_0_KERNEL; 2773 2774 /* 2775 * ANY bit is supported in v3 and up 2776 */ 2777 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) 2778 bits |= INTEL_FIXED_0_ANYTHREAD; 2779 2780 idx -= INTEL_PMC_IDX_FIXED; 2781 bits = intel_fixed_bits_by_idx(idx, bits); 2782 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); 2783 2784 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { 2785 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2786 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2787 } 2788 2789 cpuc->fixed_ctrl_val &= ~mask; 2790 cpuc->fixed_ctrl_val |= bits; 2791 } 2792 2793 static void intel_pmu_enable_event(struct perf_event *event) 2794 { 2795 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; 2796 struct hw_perf_event *hwc = &event->hw; 2797 int idx = hwc->idx; 2798 2799 if (unlikely(event->attr.precise_ip)) 2800 intel_pmu_pebs_enable(event); 2801 2802 switch (idx) { 2803 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2804 if (branch_sample_counters(event)) 2805 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; 2806 intel_set_masks(event, idx); 2807 __x86_pmu_enable_event(hwc, enable_mask); 2808 break; 2809 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2810 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2811 intel_pmu_enable_fixed(event); 2812 break; 2813 case INTEL_PMC_IDX_FIXED_BTS: 2814 if (!__this_cpu_read(cpu_hw_events.enabled)) 2815 return; 2816 intel_pmu_enable_bts(hwc->config); 2817 break; 2818 case INTEL_PMC_IDX_FIXED_VLBR: 2819 intel_set_masks(event, idx); 2820 break; 2821 default: 2822 pr_warn("Failed to enable the event with invalid index %d\n", 2823 idx); 2824 } 2825 } 2826 2827 static void intel_pmu_add_event(struct perf_event *event) 2828 { 2829 if (event->attr.precise_ip) 2830 intel_pmu_pebs_add(event); 2831 if (intel_pmu_needs_branch_stack(event)) 2832 intel_pmu_lbr_add(event); 2833 } 2834 2835 /* 2836 * Save and restart an expired event. Called by NMI contexts, 2837 * so it has to be careful about preempting normal event ops: 2838 */ 2839 int intel_pmu_save_and_restart(struct perf_event *event) 2840 { 2841 static_call(x86_pmu_update)(event); 2842 /* 2843 * For a checkpointed counter always reset back to 0. This 2844 * avoids a situation where the counter overflows, aborts the 2845 * transaction and is then set back to shortly before the 2846 * overflow, and overflows and aborts again. 2847 */ 2848 if (unlikely(event_is_checkpointed(event))) { 2849 /* No race with NMIs because the counter should not be armed */ 2850 wrmsrl(event->hw.event_base, 0); 2851 local64_set(&event->hw.prev_count, 0); 2852 } 2853 return static_call(x86_pmu_set_period)(event); 2854 } 2855 2856 static int intel_pmu_set_period(struct perf_event *event) 2857 { 2858 if (unlikely(is_topdown_count(event))) 2859 return static_call(intel_pmu_set_topdown_event_period)(event); 2860 2861 return x86_perf_event_set_period(event); 2862 } 2863 2864 static u64 intel_pmu_update(struct perf_event *event) 2865 { 2866 if (unlikely(is_topdown_count(event))) 2867 return static_call(intel_pmu_update_topdown_event)(event); 2868 2869 return x86_perf_event_update(event); 2870 } 2871 2872 static void intel_pmu_reset(void) 2873 { 2874 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 2875 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2876 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); 2877 int num_counters = hybrid(cpuc->pmu, num_counters); 2878 unsigned long flags; 2879 int idx; 2880 2881 if (!num_counters) 2882 return; 2883 2884 local_irq_save(flags); 2885 2886 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 2887 2888 for (idx = 0; idx < num_counters; idx++) { 2889 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); 2890 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); 2891 } 2892 for (idx = 0; idx < num_counters_fixed; idx++) { 2893 if (fixed_counter_disabled(idx, cpuc->pmu)) 2894 continue; 2895 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 2896 } 2897 2898 if (ds) 2899 ds->bts_index = ds->bts_buffer_base; 2900 2901 /* Ack all overflows and disable fixed counters */ 2902 if (x86_pmu.version >= 2) { 2903 intel_pmu_ack_status(intel_pmu_get_status()); 2904 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2905 } 2906 2907 /* Reset LBRs and LBR freezing */ 2908 if (x86_pmu.lbr_nr) { 2909 update_debugctlmsr(get_debugctlmsr() & 2910 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); 2911 } 2912 2913 local_irq_restore(flags); 2914 } 2915 2916 /* 2917 * We may be running with guest PEBS events created by KVM, and the 2918 * PEBS records are logged into the guest's DS and invisible to host. 2919 * 2920 * In the case of guest PEBS overflow, we only trigger a fake event 2921 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM. 2922 * The guest will then vm-entry and check the guest DS area to read 2923 * the guest PEBS records. 2924 * 2925 * The contents and other behavior of the guest event do not matter. 2926 */ 2927 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, 2928 struct perf_sample_data *data) 2929 { 2930 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2931 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; 2932 struct perf_event *event = NULL; 2933 int bit; 2934 2935 if (!unlikely(perf_guest_state())) 2936 return; 2937 2938 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || 2939 !guest_pebs_idxs) 2940 return; 2941 2942 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, 2943 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) { 2944 event = cpuc->events[bit]; 2945 if (!event->attr.precise_ip) 2946 continue; 2947 2948 perf_sample_data_init(data, 0, event->hw.last_period); 2949 if (perf_event_overflow(event, data, regs)) 2950 x86_pmu_stop(event, 0); 2951 2952 /* Inject one fake event is enough. */ 2953 break; 2954 } 2955 } 2956 2957 static int handle_pmi_common(struct pt_regs *regs, u64 status) 2958 { 2959 struct perf_sample_data data; 2960 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2961 int bit; 2962 int handled = 0; 2963 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2964 2965 inc_irq_stat(apic_perf_irqs); 2966 2967 /* 2968 * Ignore a range of extra bits in status that do not indicate 2969 * overflow by themselves. 2970 */ 2971 status &= ~(GLOBAL_STATUS_COND_CHG | 2972 GLOBAL_STATUS_ASIF | 2973 GLOBAL_STATUS_LBRS_FROZEN); 2974 if (!status) 2975 return 0; 2976 /* 2977 * In case multiple PEBS events are sampled at the same time, 2978 * it is possible to have GLOBAL_STATUS bit 62 set indicating 2979 * PEBS buffer overflow and also seeing at most 3 PEBS counters 2980 * having their bits set in the status register. This is a sign 2981 * that there was at least one PEBS record pending at the time 2982 * of the PMU interrupt. PEBS counters must only be processed 2983 * via the drain_pebs() calls and not via the regular sample 2984 * processing loop coming after that the function, otherwise 2985 * phony regular samples may be generated in the sampling buffer 2986 * not marked with the EXACT tag. Another possibility is to have 2987 * one PEBS event and at least one non-PEBS event which overflows 2988 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will 2989 * not be set, yet the overflow status bit for the PEBS counter will 2990 * be on Skylake. 2991 * 2992 * To avoid this problem, we systematically ignore the PEBS-enabled 2993 * counters from the GLOBAL_STATUS mask and we always process PEBS 2994 * events via drain_pebs(). 2995 */ 2996 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); 2997 2998 /* 2999 * PEBS overflow sets bit 62 in the global status register 3000 */ 3001 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { 3002 u64 pebs_enabled = cpuc->pebs_enabled; 3003 3004 handled++; 3005 x86_pmu_handle_guest_pebs(regs, &data); 3006 x86_pmu.drain_pebs(regs, &data); 3007 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; 3008 3009 /* 3010 * PMI throttle may be triggered, which stops the PEBS event. 3011 * Although cpuc->pebs_enabled is updated accordingly, the 3012 * MSR_IA32_PEBS_ENABLE is not updated. Because the 3013 * cpuc->enabled has been forced to 0 in PMI. 3014 * Update the MSR if pebs_enabled is changed. 3015 */ 3016 if (pebs_enabled != cpuc->pebs_enabled) 3017 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3018 } 3019 3020 /* 3021 * Intel PT 3022 */ 3023 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { 3024 handled++; 3025 if (!perf_guest_handle_intel_pt_intr()) 3026 intel_pt_interrupt(); 3027 } 3028 3029 /* 3030 * Intel Perf metrics 3031 */ 3032 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { 3033 handled++; 3034 static_call(intel_pmu_update_topdown_event)(NULL); 3035 } 3036 3037 /* 3038 * Checkpointed counters can lead to 'spurious' PMIs because the 3039 * rollback caused by the PMI will have cleared the overflow status 3040 * bit. Therefore always force probe these counters. 3041 */ 3042 status |= cpuc->intel_cp_status; 3043 3044 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 3045 struct perf_event *event = cpuc->events[bit]; 3046 3047 handled++; 3048 3049 if (!test_bit(bit, cpuc->active_mask)) 3050 continue; 3051 3052 if (!intel_pmu_save_and_restart(event)) 3053 continue; 3054 3055 perf_sample_data_init(&data, 0, event->hw.last_period); 3056 3057 if (has_branch_stack(event)) 3058 intel_pmu_lbr_save_brstack(&data, cpuc, event); 3059 3060 if (perf_event_overflow(event, &data, regs)) 3061 x86_pmu_stop(event, 0); 3062 } 3063 3064 return handled; 3065 } 3066 3067 /* 3068 * This handler is triggered by the local APIC, so the APIC IRQ handling 3069 * rules apply: 3070 */ 3071 static int intel_pmu_handle_irq(struct pt_regs *regs) 3072 { 3073 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3074 bool late_ack = hybrid_bit(cpuc->pmu, late_ack); 3075 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); 3076 int loops; 3077 u64 status; 3078 int handled; 3079 int pmu_enabled; 3080 3081 /* 3082 * Save the PMU state. 3083 * It needs to be restored when leaving the handler. 3084 */ 3085 pmu_enabled = cpuc->enabled; 3086 /* 3087 * In general, the early ACK is only applied for old platforms. 3088 * For the big core starts from Haswell, the late ACK should be 3089 * applied. 3090 * For the small core after Tremont, we have to do the ACK right 3091 * before re-enabling counters, which is in the middle of the 3092 * NMI handler. 3093 */ 3094 if (!late_ack && !mid_ack) 3095 apic_write(APIC_LVTPC, APIC_DM_NMI); 3096 intel_bts_disable_local(); 3097 cpuc->enabled = 0; 3098 __intel_pmu_disable_all(true); 3099 handled = intel_pmu_drain_bts_buffer(); 3100 handled += intel_bts_interrupt(); 3101 status = intel_pmu_get_status(); 3102 if (!status) 3103 goto done; 3104 3105 loops = 0; 3106 again: 3107 intel_pmu_lbr_read(); 3108 intel_pmu_ack_status(status); 3109 if (++loops > 100) { 3110 static bool warned; 3111 3112 if (!warned) { 3113 WARN(1, "perfevents: irq loop stuck!\n"); 3114 perf_event_print_debug(); 3115 warned = true; 3116 } 3117 intel_pmu_reset(); 3118 goto done; 3119 } 3120 3121 handled += handle_pmi_common(regs, status); 3122 3123 /* 3124 * Repeat if there is more work to be done: 3125 */ 3126 status = intel_pmu_get_status(); 3127 if (status) 3128 goto again; 3129 3130 done: 3131 if (mid_ack) 3132 apic_write(APIC_LVTPC, APIC_DM_NMI); 3133 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 3134 cpuc->enabled = pmu_enabled; 3135 if (pmu_enabled) 3136 __intel_pmu_enable_all(0, true); 3137 intel_bts_enable_local(); 3138 3139 /* 3140 * Only unmask the NMI after the overflow counters 3141 * have been reset. This avoids spurious NMIs on 3142 * Haswell CPUs. 3143 */ 3144 if (late_ack) 3145 apic_write(APIC_LVTPC, APIC_DM_NMI); 3146 return handled; 3147 } 3148 3149 static struct event_constraint * 3150 intel_bts_constraints(struct perf_event *event) 3151 { 3152 if (unlikely(intel_pmu_has_bts(event))) 3153 return &bts_constraint; 3154 3155 return NULL; 3156 } 3157 3158 /* 3159 * Note: matches a fake event, like Fixed2. 3160 */ 3161 static struct event_constraint * 3162 intel_vlbr_constraints(struct perf_event *event) 3163 { 3164 struct event_constraint *c = &vlbr_constraint; 3165 3166 if (unlikely(constraint_match(c, event->hw.config))) { 3167 event->hw.flags |= c->flags; 3168 return c; 3169 } 3170 3171 return NULL; 3172 } 3173 3174 static int intel_alt_er(struct cpu_hw_events *cpuc, 3175 int idx, u64 config) 3176 { 3177 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); 3178 int alt_idx = idx; 3179 3180 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) 3181 return idx; 3182 3183 if (idx == EXTRA_REG_RSP_0) 3184 alt_idx = EXTRA_REG_RSP_1; 3185 3186 if (idx == EXTRA_REG_RSP_1) 3187 alt_idx = EXTRA_REG_RSP_0; 3188 3189 if (config & ~extra_regs[alt_idx].valid_mask) 3190 return idx; 3191 3192 return alt_idx; 3193 } 3194 3195 static void intel_fixup_er(struct perf_event *event, int idx) 3196 { 3197 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); 3198 event->hw.extra_reg.idx = idx; 3199 3200 if (idx == EXTRA_REG_RSP_0) { 3201 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3202 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event; 3203 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 3204 } else if (idx == EXTRA_REG_RSP_1) { 3205 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3206 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event; 3207 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 3208 } 3209 } 3210 3211 /* 3212 * manage allocation of shared extra msr for certain events 3213 * 3214 * sharing can be: 3215 * per-cpu: to be shared between the various events on a single PMU 3216 * per-core: per-cpu + shared by HT threads 3217 */ 3218 static struct event_constraint * 3219 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 3220 struct perf_event *event, 3221 struct hw_perf_event_extra *reg) 3222 { 3223 struct event_constraint *c = &emptyconstraint; 3224 struct er_account *era; 3225 unsigned long flags; 3226 int idx = reg->idx; 3227 3228 /* 3229 * reg->alloc can be set due to existing state, so for fake cpuc we 3230 * need to ignore this, otherwise we might fail to allocate proper fake 3231 * state for this extra reg constraint. Also see the comment below. 3232 */ 3233 if (reg->alloc && !cpuc->is_fake) 3234 return NULL; /* call x86_get_event_constraint() */ 3235 3236 again: 3237 era = &cpuc->shared_regs->regs[idx]; 3238 /* 3239 * we use spin_lock_irqsave() to avoid lockdep issues when 3240 * passing a fake cpuc 3241 */ 3242 raw_spin_lock_irqsave(&era->lock, flags); 3243 3244 if (!atomic_read(&era->ref) || era->config == reg->config) { 3245 3246 /* 3247 * If its a fake cpuc -- as per validate_{group,event}() we 3248 * shouldn't touch event state and we can avoid doing so 3249 * since both will only call get_event_constraints() once 3250 * on each event, this avoids the need for reg->alloc. 3251 * 3252 * Not doing the ER fixup will only result in era->reg being 3253 * wrong, but since we won't actually try and program hardware 3254 * this isn't a problem either. 3255 */ 3256 if (!cpuc->is_fake) { 3257 if (idx != reg->idx) 3258 intel_fixup_er(event, idx); 3259 3260 /* 3261 * x86_schedule_events() can call get_event_constraints() 3262 * multiple times on events in the case of incremental 3263 * scheduling(). reg->alloc ensures we only do the ER 3264 * allocation once. 3265 */ 3266 reg->alloc = 1; 3267 } 3268 3269 /* lock in msr value */ 3270 era->config = reg->config; 3271 era->reg = reg->reg; 3272 3273 /* one more user */ 3274 atomic_inc(&era->ref); 3275 3276 /* 3277 * need to call x86_get_event_constraint() 3278 * to check if associated event has constraints 3279 */ 3280 c = NULL; 3281 } else { 3282 idx = intel_alt_er(cpuc, idx, reg->config); 3283 if (idx != reg->idx) { 3284 raw_spin_unlock_irqrestore(&era->lock, flags); 3285 goto again; 3286 } 3287 } 3288 raw_spin_unlock_irqrestore(&era->lock, flags); 3289 3290 return c; 3291 } 3292 3293 static void 3294 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, 3295 struct hw_perf_event_extra *reg) 3296 { 3297 struct er_account *era; 3298 3299 /* 3300 * Only put constraint if extra reg was actually allocated. Also takes 3301 * care of event which do not use an extra shared reg. 3302 * 3303 * Also, if this is a fake cpuc we shouldn't touch any event state 3304 * (reg->alloc) and we don't care about leaving inconsistent cpuc state 3305 * either since it'll be thrown out. 3306 */ 3307 if (!reg->alloc || cpuc->is_fake) 3308 return; 3309 3310 era = &cpuc->shared_regs->regs[reg->idx]; 3311 3312 /* one fewer user */ 3313 atomic_dec(&era->ref); 3314 3315 /* allocate again next time */ 3316 reg->alloc = 0; 3317 } 3318 3319 static struct event_constraint * 3320 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 3321 struct perf_event *event) 3322 { 3323 struct event_constraint *c = NULL, *d; 3324 struct hw_perf_event_extra *xreg, *breg; 3325 3326 xreg = &event->hw.extra_reg; 3327 if (xreg->idx != EXTRA_REG_NONE) { 3328 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 3329 if (c == &emptyconstraint) 3330 return c; 3331 } 3332 breg = &event->hw.branch_reg; 3333 if (breg->idx != EXTRA_REG_NONE) { 3334 d = __intel_shared_reg_get_constraints(cpuc, event, breg); 3335 if (d == &emptyconstraint) { 3336 __intel_shared_reg_put_constraints(cpuc, xreg); 3337 c = d; 3338 } 3339 } 3340 return c; 3341 } 3342 3343 struct event_constraint * 3344 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3345 struct perf_event *event) 3346 { 3347 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); 3348 struct event_constraint *c; 3349 3350 if (event_constraints) { 3351 for_each_event_constraint(c, event_constraints) { 3352 if (constraint_match(c, event->hw.config)) { 3353 event->hw.flags |= c->flags; 3354 return c; 3355 } 3356 } 3357 } 3358 3359 return &hybrid_var(cpuc->pmu, unconstrained); 3360 } 3361 3362 static struct event_constraint * 3363 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3364 struct perf_event *event) 3365 { 3366 struct event_constraint *c; 3367 3368 c = intel_vlbr_constraints(event); 3369 if (c) 3370 return c; 3371 3372 c = intel_bts_constraints(event); 3373 if (c) 3374 return c; 3375 3376 c = intel_shared_regs_constraints(cpuc, event); 3377 if (c) 3378 return c; 3379 3380 c = intel_pebs_constraints(event); 3381 if (c) 3382 return c; 3383 3384 return x86_get_event_constraints(cpuc, idx, event); 3385 } 3386 3387 static void 3388 intel_start_scheduling(struct cpu_hw_events *cpuc) 3389 { 3390 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3391 struct intel_excl_states *xl; 3392 int tid = cpuc->excl_thread_id; 3393 3394 /* 3395 * nothing needed if in group validation mode 3396 */ 3397 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3398 return; 3399 3400 /* 3401 * no exclusion needed 3402 */ 3403 if (WARN_ON_ONCE(!excl_cntrs)) 3404 return; 3405 3406 xl = &excl_cntrs->states[tid]; 3407 3408 xl->sched_started = true; 3409 /* 3410 * lock shared state until we are done scheduling 3411 * in stop_event_scheduling() 3412 * makes scheduling appear as a transaction 3413 */ 3414 raw_spin_lock(&excl_cntrs->lock); 3415 } 3416 3417 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 3418 { 3419 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3420 struct event_constraint *c = cpuc->event_constraint[idx]; 3421 struct intel_excl_states *xl; 3422 int tid = cpuc->excl_thread_id; 3423 3424 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3425 return; 3426 3427 if (WARN_ON_ONCE(!excl_cntrs)) 3428 return; 3429 3430 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) 3431 return; 3432 3433 xl = &excl_cntrs->states[tid]; 3434 3435 lockdep_assert_held(&excl_cntrs->lock); 3436 3437 if (c->flags & PERF_X86_EVENT_EXCL) 3438 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; 3439 else 3440 xl->state[cntr] = INTEL_EXCL_SHARED; 3441 } 3442 3443 static void 3444 intel_stop_scheduling(struct cpu_hw_events *cpuc) 3445 { 3446 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3447 struct intel_excl_states *xl; 3448 int tid = cpuc->excl_thread_id; 3449 3450 /* 3451 * nothing needed if in group validation mode 3452 */ 3453 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3454 return; 3455 /* 3456 * no exclusion needed 3457 */ 3458 if (WARN_ON_ONCE(!excl_cntrs)) 3459 return; 3460 3461 xl = &excl_cntrs->states[tid]; 3462 3463 xl->sched_started = false; 3464 /* 3465 * release shared state lock (acquired in intel_start_scheduling()) 3466 */ 3467 raw_spin_unlock(&excl_cntrs->lock); 3468 } 3469 3470 static struct event_constraint * 3471 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) 3472 { 3473 WARN_ON_ONCE(!cpuc->constraint_list); 3474 3475 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { 3476 struct event_constraint *cx; 3477 3478 /* 3479 * grab pre-allocated constraint entry 3480 */ 3481 cx = &cpuc->constraint_list[idx]; 3482 3483 /* 3484 * initialize dynamic constraint 3485 * with static constraint 3486 */ 3487 *cx = *c; 3488 3489 /* 3490 * mark constraint as dynamic 3491 */ 3492 cx->flags |= PERF_X86_EVENT_DYNAMIC; 3493 c = cx; 3494 } 3495 3496 return c; 3497 } 3498 3499 static struct event_constraint * 3500 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 3501 int idx, struct event_constraint *c) 3502 { 3503 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3504 struct intel_excl_states *xlo; 3505 int tid = cpuc->excl_thread_id; 3506 int is_excl, i, w; 3507 3508 /* 3509 * validating a group does not require 3510 * enforcing cross-thread exclusion 3511 */ 3512 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3513 return c; 3514 3515 /* 3516 * no exclusion needed 3517 */ 3518 if (WARN_ON_ONCE(!excl_cntrs)) 3519 return c; 3520 3521 /* 3522 * because we modify the constraint, we need 3523 * to make a copy. Static constraints come 3524 * from static const tables. 3525 * 3526 * only needed when constraint has not yet 3527 * been cloned (marked dynamic) 3528 */ 3529 c = dyn_constraint(cpuc, c, idx); 3530 3531 /* 3532 * From here on, the constraint is dynamic. 3533 * Either it was just allocated above, or it 3534 * was allocated during a earlier invocation 3535 * of this function 3536 */ 3537 3538 /* 3539 * state of sibling HT 3540 */ 3541 xlo = &excl_cntrs->states[tid ^ 1]; 3542 3543 /* 3544 * event requires exclusive counter access 3545 * across HT threads 3546 */ 3547 is_excl = c->flags & PERF_X86_EVENT_EXCL; 3548 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { 3549 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; 3550 if (!cpuc->n_excl++) 3551 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); 3552 } 3553 3554 /* 3555 * Modify static constraint with current dynamic 3556 * state of thread 3557 * 3558 * EXCLUSIVE: sibling counter measuring exclusive event 3559 * SHARED : sibling counter measuring non-exclusive event 3560 * UNUSED : sibling counter unused 3561 */ 3562 w = c->weight; 3563 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { 3564 /* 3565 * exclusive event in sibling counter 3566 * our corresponding counter cannot be used 3567 * regardless of our event 3568 */ 3569 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { 3570 __clear_bit(i, c->idxmsk); 3571 w--; 3572 continue; 3573 } 3574 /* 3575 * if measuring an exclusive event, sibling 3576 * measuring non-exclusive, then counter cannot 3577 * be used 3578 */ 3579 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { 3580 __clear_bit(i, c->idxmsk); 3581 w--; 3582 continue; 3583 } 3584 } 3585 3586 /* 3587 * if we return an empty mask, then switch 3588 * back to static empty constraint to avoid 3589 * the cost of freeing later on 3590 */ 3591 if (!w) 3592 c = &emptyconstraint; 3593 3594 c->weight = w; 3595 3596 return c; 3597 } 3598 3599 static struct event_constraint * 3600 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3601 struct perf_event *event) 3602 { 3603 struct event_constraint *c1, *c2; 3604 3605 c1 = cpuc->event_constraint[idx]; 3606 3607 /* 3608 * first time only 3609 * - static constraint: no change across incremental scheduling calls 3610 * - dynamic constraint: handled by intel_get_excl_constraints() 3611 */ 3612 c2 = __intel_get_event_constraints(cpuc, idx, event); 3613 if (c1) { 3614 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); 3615 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); 3616 c1->weight = c2->weight; 3617 c2 = c1; 3618 } 3619 3620 if (cpuc->excl_cntrs) 3621 return intel_get_excl_constraints(cpuc, event, idx, c2); 3622 3623 /* Not all counters support the branch counter feature. */ 3624 if (branch_sample_counters(event)) { 3625 c2 = dyn_constraint(cpuc, c2, idx); 3626 c2->idxmsk64 &= x86_pmu.lbr_counters; 3627 c2->weight = hweight64(c2->idxmsk64); 3628 } 3629 3630 return c2; 3631 } 3632 3633 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, 3634 struct perf_event *event) 3635 { 3636 struct hw_perf_event *hwc = &event->hw; 3637 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3638 int tid = cpuc->excl_thread_id; 3639 struct intel_excl_states *xl; 3640 3641 /* 3642 * nothing needed if in group validation mode 3643 */ 3644 if (cpuc->is_fake) 3645 return; 3646 3647 if (WARN_ON_ONCE(!excl_cntrs)) 3648 return; 3649 3650 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { 3651 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; 3652 if (!--cpuc->n_excl) 3653 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); 3654 } 3655 3656 /* 3657 * If event was actually assigned, then mark the counter state as 3658 * unused now. 3659 */ 3660 if (hwc->idx >= 0) { 3661 xl = &excl_cntrs->states[tid]; 3662 3663 /* 3664 * put_constraint may be called from x86_schedule_events() 3665 * which already has the lock held so here make locking 3666 * conditional. 3667 */ 3668 if (!xl->sched_started) 3669 raw_spin_lock(&excl_cntrs->lock); 3670 3671 xl->state[hwc->idx] = INTEL_EXCL_UNUSED; 3672 3673 if (!xl->sched_started) 3674 raw_spin_unlock(&excl_cntrs->lock); 3675 } 3676 } 3677 3678 static void 3679 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, 3680 struct perf_event *event) 3681 { 3682 struct hw_perf_event_extra *reg; 3683 3684 reg = &event->hw.extra_reg; 3685 if (reg->idx != EXTRA_REG_NONE) 3686 __intel_shared_reg_put_constraints(cpuc, reg); 3687 3688 reg = &event->hw.branch_reg; 3689 if (reg->idx != EXTRA_REG_NONE) 3690 __intel_shared_reg_put_constraints(cpuc, reg); 3691 } 3692 3693 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 3694 struct perf_event *event) 3695 { 3696 intel_put_shared_regs_event_constraints(cpuc, event); 3697 3698 /* 3699 * is PMU has exclusive counter restrictions, then 3700 * all events are subject to and must call the 3701 * put_excl_constraints() routine 3702 */ 3703 if (cpuc->excl_cntrs) 3704 intel_put_excl_constraints(cpuc, event); 3705 } 3706 3707 static void intel_pebs_aliases_core2(struct perf_event *event) 3708 { 3709 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3710 /* 3711 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3712 * (0x003c) so that we can use it with PEBS. 3713 * 3714 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3715 * PEBS capable. However we can use INST_RETIRED.ANY_P 3716 * (0x00c0), which is a PEBS capable event, to get the same 3717 * count. 3718 * 3719 * INST_RETIRED.ANY_P counts the number of cycles that retires 3720 * CNTMASK instructions. By setting CNTMASK to a value (16) 3721 * larger than the maximum number of instructions that can be 3722 * retired per cycle (4) and then inverting the condition, we 3723 * count all cycles that retire 16 or less instructions, which 3724 * is every cycle. 3725 * 3726 * Thereby we gain a PEBS capable cycle counter. 3727 */ 3728 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 3729 3730 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3731 event->hw.config = alt_config; 3732 } 3733 } 3734 3735 static void intel_pebs_aliases_snb(struct perf_event *event) 3736 { 3737 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3738 /* 3739 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3740 * (0x003c) so that we can use it with PEBS. 3741 * 3742 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3743 * PEBS capable. However we can use UOPS_RETIRED.ALL 3744 * (0x01c2), which is a PEBS capable event, to get the same 3745 * count. 3746 * 3747 * UOPS_RETIRED.ALL counts the number of cycles that retires 3748 * CNTMASK micro-ops. By setting CNTMASK to a value (16) 3749 * larger than the maximum number of micro-ops that can be 3750 * retired per cycle (4) and then inverting the condition, we 3751 * count all cycles that retire 16 or less micro-ops, which 3752 * is every cycle. 3753 * 3754 * Thereby we gain a PEBS capable cycle counter. 3755 */ 3756 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 3757 3758 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3759 event->hw.config = alt_config; 3760 } 3761 } 3762 3763 static void intel_pebs_aliases_precdist(struct perf_event *event) 3764 { 3765 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3766 /* 3767 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3768 * (0x003c) so that we can use it with PEBS. 3769 * 3770 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3771 * PEBS capable. However we can use INST_RETIRED.PREC_DIST 3772 * (0x01c0), which is a PEBS capable event, to get the same 3773 * count. 3774 * 3775 * The PREC_DIST event has special support to minimize sample 3776 * shadowing effects. One drawback is that it can be 3777 * only programmed on counter 1, but that seems like an 3778 * acceptable trade off. 3779 */ 3780 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); 3781 3782 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3783 event->hw.config = alt_config; 3784 } 3785 } 3786 3787 static void intel_pebs_aliases_ivb(struct perf_event *event) 3788 { 3789 if (event->attr.precise_ip < 3) 3790 return intel_pebs_aliases_snb(event); 3791 return intel_pebs_aliases_precdist(event); 3792 } 3793 3794 static void intel_pebs_aliases_skl(struct perf_event *event) 3795 { 3796 if (event->attr.precise_ip < 3) 3797 return intel_pebs_aliases_core2(event); 3798 return intel_pebs_aliases_precdist(event); 3799 } 3800 3801 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) 3802 { 3803 unsigned long flags = x86_pmu.large_pebs_flags; 3804 3805 if (event->attr.use_clockid) 3806 flags &= ~PERF_SAMPLE_TIME; 3807 if (!event->attr.exclude_kernel) 3808 flags &= ~PERF_SAMPLE_REGS_USER; 3809 if (event->attr.sample_regs_user & ~PEBS_GP_REGS) 3810 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); 3811 return flags; 3812 } 3813 3814 static int intel_pmu_bts_config(struct perf_event *event) 3815 { 3816 struct perf_event_attr *attr = &event->attr; 3817 3818 if (unlikely(intel_pmu_has_bts(event))) { 3819 /* BTS is not supported by this architecture. */ 3820 if (!x86_pmu.bts_active) 3821 return -EOPNOTSUPP; 3822 3823 /* BTS is currently only allowed for user-mode. */ 3824 if (!attr->exclude_kernel) 3825 return -EOPNOTSUPP; 3826 3827 /* BTS is not allowed for precise events. */ 3828 if (attr->precise_ip) 3829 return -EOPNOTSUPP; 3830 3831 /* disallow bts if conflicting events are present */ 3832 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3833 return -EBUSY; 3834 3835 event->destroy = hw_perf_lbr_event_destroy; 3836 } 3837 3838 return 0; 3839 } 3840 3841 static int core_pmu_hw_config(struct perf_event *event) 3842 { 3843 int ret = x86_pmu_hw_config(event); 3844 3845 if (ret) 3846 return ret; 3847 3848 return intel_pmu_bts_config(event); 3849 } 3850 3851 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \ 3852 ((x86_pmu.num_topdown_events - 1) << 8)) 3853 3854 static bool is_available_metric_event(struct perf_event *event) 3855 { 3856 return is_metric_event(event) && 3857 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX; 3858 } 3859 3860 static inline bool is_mem_loads_event(struct perf_event *event) 3861 { 3862 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01); 3863 } 3864 3865 static inline bool is_mem_loads_aux_event(struct perf_event *event) 3866 { 3867 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); 3868 } 3869 3870 static inline bool require_mem_loads_aux_event(struct perf_event *event) 3871 { 3872 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) 3873 return false; 3874 3875 if (is_hybrid()) 3876 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big; 3877 3878 return true; 3879 } 3880 3881 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) 3882 { 3883 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); 3884 3885 return test_bit(idx, (unsigned long *)&intel_cap->capabilities); 3886 } 3887 3888 static int intel_pmu_hw_config(struct perf_event *event) 3889 { 3890 int ret = x86_pmu_hw_config(event); 3891 3892 if (ret) 3893 return ret; 3894 3895 ret = intel_pmu_bts_config(event); 3896 if (ret) 3897 return ret; 3898 3899 if (event->attr.precise_ip) { 3900 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) 3901 return -EINVAL; 3902 3903 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { 3904 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3905 if (!(event->attr.sample_type & 3906 ~intel_pmu_large_pebs_flags(event))) { 3907 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; 3908 event->attach_state |= PERF_ATTACH_SCHED_CB; 3909 } 3910 } 3911 if (x86_pmu.pebs_aliases) 3912 x86_pmu.pebs_aliases(event); 3913 } 3914 3915 if (needs_branch_stack(event) && is_sampling_event(event)) 3916 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; 3917 3918 if (branch_sample_counters(event)) { 3919 struct perf_event *leader, *sibling; 3920 int num = 0; 3921 3922 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || 3923 (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) 3924 return -EINVAL; 3925 3926 /* 3927 * The branch counter logging is not supported in the call stack 3928 * mode yet, since we cannot simply flush the LBR during e.g., 3929 * multiplexing. Also, there is no obvious usage with the call 3930 * stack mode. Simply forbids it for now. 3931 * 3932 * If any events in the group enable the branch counter logging 3933 * feature, the group is treated as a branch counter logging 3934 * group, which requires the extra space to store the counters. 3935 */ 3936 leader = event->group_leader; 3937 if (branch_sample_call_stack(leader)) 3938 return -EINVAL; 3939 if (branch_sample_counters(leader)) 3940 num++; 3941 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; 3942 3943 for_each_sibling_event(sibling, leader) { 3944 if (branch_sample_call_stack(sibling)) 3945 return -EINVAL; 3946 if (branch_sample_counters(sibling)) 3947 num++; 3948 } 3949 3950 if (num > fls(x86_pmu.lbr_counters)) 3951 return -EINVAL; 3952 /* 3953 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't 3954 * require any branch stack setup. 3955 * Clear the bit to avoid unnecessary branch stack setup. 3956 */ 3957 if (0 == (event->attr.branch_sample_type & 3958 ~(PERF_SAMPLE_BRANCH_PLM_ALL | 3959 PERF_SAMPLE_BRANCH_COUNTERS))) 3960 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; 3961 3962 /* 3963 * Force the leader to be a LBR event. So LBRs can be reset 3964 * with the leader event. See intel_pmu_lbr_del() for details. 3965 */ 3966 if (!intel_pmu_needs_branch_stack(leader)) 3967 return -EINVAL; 3968 } 3969 3970 if (intel_pmu_needs_branch_stack(event)) { 3971 ret = intel_pmu_setup_lbr_filter(event); 3972 if (ret) 3973 return ret; 3974 event->attach_state |= PERF_ATTACH_SCHED_CB; 3975 3976 /* 3977 * BTS is set up earlier in this path, so don't account twice 3978 */ 3979 if (!unlikely(intel_pmu_has_bts(event))) { 3980 /* disallow lbr if conflicting events are present */ 3981 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3982 return -EBUSY; 3983 3984 event->destroy = hw_perf_lbr_event_destroy; 3985 } 3986 } 3987 3988 if (event->attr.aux_output) { 3989 if (!event->attr.precise_ip) 3990 return -EINVAL; 3991 3992 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; 3993 } 3994 3995 if ((event->attr.type == PERF_TYPE_HARDWARE) || 3996 (event->attr.type == PERF_TYPE_HW_CACHE)) 3997 return 0; 3998 3999 /* 4000 * Config Topdown slots and metric events 4001 * 4002 * The slots event on Fixed Counter 3 can support sampling, 4003 * which will be handled normally in x86_perf_event_update(). 4004 * 4005 * Metric events don't support sampling and require being paired 4006 * with a slots event as group leader. When the slots event 4007 * is used in a metrics group, it too cannot support sampling. 4008 */ 4009 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { 4010 if (event->attr.config1 || event->attr.config2) 4011 return -EINVAL; 4012 4013 /* 4014 * The TopDown metrics events and slots event don't 4015 * support any filters. 4016 */ 4017 if (event->attr.config & X86_ALL_EVENT_FLAGS) 4018 return -EINVAL; 4019 4020 if (is_available_metric_event(event)) { 4021 struct perf_event *leader = event->group_leader; 4022 4023 /* The metric events don't support sampling. */ 4024 if (is_sampling_event(event)) 4025 return -EINVAL; 4026 4027 /* The metric events require a slots group leader. */ 4028 if (!is_slots_event(leader)) 4029 return -EINVAL; 4030 4031 /* 4032 * The leader/SLOTS must not be a sampling event for 4033 * metric use; hardware requires it starts at 0 when used 4034 * in conjunction with MSR_PERF_METRICS. 4035 */ 4036 if (is_sampling_event(leader)) 4037 return -EINVAL; 4038 4039 event->event_caps |= PERF_EV_CAP_SIBLING; 4040 /* 4041 * Only once we have a METRICs sibling do we 4042 * need TopDown magic. 4043 */ 4044 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; 4045 event->hw.flags |= PERF_X86_EVENT_TOPDOWN; 4046 } 4047 } 4048 4049 /* 4050 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR 4051 * doesn't function quite right. As a work-around it needs to always be 4052 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82). 4053 * The actual count of this second event is irrelevant it just needs 4054 * to be active to make the first event function correctly. 4055 * 4056 * In a group, the auxiliary event must be in front of the load latency 4057 * event. The rule is to simplify the implementation of the check. 4058 * That's because perf cannot have a complete group at the moment. 4059 */ 4060 if (require_mem_loads_aux_event(event) && 4061 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && 4062 is_mem_loads_event(event)) { 4063 struct perf_event *leader = event->group_leader; 4064 struct perf_event *sibling = NULL; 4065 4066 /* 4067 * When this memload event is also the first event (no group 4068 * exists yet), then there is no aux event before it. 4069 */ 4070 if (leader == event) 4071 return -ENODATA; 4072 4073 if (!is_mem_loads_aux_event(leader)) { 4074 for_each_sibling_event(sibling, leader) { 4075 if (is_mem_loads_aux_event(sibling)) 4076 break; 4077 } 4078 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list)) 4079 return -ENODATA; 4080 } 4081 } 4082 4083 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) 4084 return 0; 4085 4086 if (x86_pmu.version < 3) 4087 return -EINVAL; 4088 4089 ret = perf_allow_cpu(&event->attr); 4090 if (ret) 4091 return ret; 4092 4093 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; 4094 4095 return 0; 4096 } 4097 4098 /* 4099 * Currently, the only caller of this function is the atomic_switch_perf_msrs(). 4100 * The host perf context helps to prepare the values of the real hardware for 4101 * a set of msrs that need to be switched atomically in a vmx transaction. 4102 * 4103 * For example, the pseudocode needed to add a new msr should look like: 4104 * 4105 * arr[(*nr)++] = (struct perf_guest_switch_msr){ 4106 * .msr = the hardware msr address, 4107 * .host = the value the hardware has when it doesn't run a guest, 4108 * .guest = the value the hardware has when it runs a guest, 4109 * }; 4110 * 4111 * These values have nothing to do with the emulated values the guest sees 4112 * when it uses {RD,WR}MSR, which should be handled by the KVM context, 4113 * specifically in the intel_pmu_{get,set}_msr(). 4114 */ 4115 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) 4116 { 4117 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4118 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4119 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data; 4120 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 4121 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; 4122 int global_ctrl, pebs_enable; 4123 4124 /* 4125 * In addition to obeying exclude_guest/exclude_host, remove bits being 4126 * used for PEBS when running a guest, because PEBS writes to virtual 4127 * addresses (not physical addresses). 4128 */ 4129 *nr = 0; 4130 global_ctrl = (*nr)++; 4131 arr[global_ctrl] = (struct perf_guest_switch_msr){ 4132 .msr = MSR_CORE_PERF_GLOBAL_CTRL, 4133 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, 4134 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, 4135 }; 4136 4137 if (!x86_pmu.pebs) 4138 return arr; 4139 4140 /* 4141 * If PMU counter has PEBS enabled it is not enough to 4142 * disable counter on a guest entry since PEBS memory 4143 * write can overshoot guest entry and corrupt guest 4144 * memory. Disabling PEBS solves the problem. 4145 * 4146 * Don't do this if the CPU already enforces it. 4147 */ 4148 if (x86_pmu.pebs_no_isolation) { 4149 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4150 .msr = MSR_IA32_PEBS_ENABLE, 4151 .host = cpuc->pebs_enabled, 4152 .guest = 0, 4153 }; 4154 return arr; 4155 } 4156 4157 if (!kvm_pmu || !x86_pmu.pebs_ept) 4158 return arr; 4159 4160 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4161 .msr = MSR_IA32_DS_AREA, 4162 .host = (unsigned long)cpuc->ds, 4163 .guest = kvm_pmu->ds_area, 4164 }; 4165 4166 if (x86_pmu.intel_cap.pebs_baseline) { 4167 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4168 .msr = MSR_PEBS_DATA_CFG, 4169 .host = cpuc->active_pebs_data_cfg, 4170 .guest = kvm_pmu->pebs_data_cfg, 4171 }; 4172 } 4173 4174 pebs_enable = (*nr)++; 4175 arr[pebs_enable] = (struct perf_guest_switch_msr){ 4176 .msr = MSR_IA32_PEBS_ENABLE, 4177 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, 4178 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, 4179 }; 4180 4181 if (arr[pebs_enable].host) { 4182 /* Disable guest PEBS if host PEBS is enabled. */ 4183 arr[pebs_enable].guest = 0; 4184 } else { 4185 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */ 4186 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask; 4187 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask; 4188 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */ 4189 arr[global_ctrl].guest |= arr[pebs_enable].guest; 4190 } 4191 4192 return arr; 4193 } 4194 4195 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) 4196 { 4197 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4198 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4199 int idx; 4200 4201 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4202 struct perf_event *event = cpuc->events[idx]; 4203 4204 arr[idx].msr = x86_pmu_config_addr(idx); 4205 arr[idx].host = arr[idx].guest = 0; 4206 4207 if (!test_bit(idx, cpuc->active_mask)) 4208 continue; 4209 4210 arr[idx].host = arr[idx].guest = 4211 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; 4212 4213 if (event->attr.exclude_host) 4214 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4215 else if (event->attr.exclude_guest) 4216 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4217 } 4218 4219 *nr = x86_pmu.num_counters; 4220 return arr; 4221 } 4222 4223 static void core_pmu_enable_event(struct perf_event *event) 4224 { 4225 if (!event->attr.exclude_host) 4226 x86_pmu_enable_event(event); 4227 } 4228 4229 static void core_pmu_enable_all(int added) 4230 { 4231 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4232 int idx; 4233 4234 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4235 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 4236 4237 if (!test_bit(idx, cpuc->active_mask) || 4238 cpuc->events[idx]->attr.exclude_host) 4239 continue; 4240 4241 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 4242 } 4243 } 4244 4245 static int hsw_hw_config(struct perf_event *event) 4246 { 4247 int ret = intel_pmu_hw_config(event); 4248 4249 if (ret) 4250 return ret; 4251 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) 4252 return 0; 4253 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); 4254 4255 /* 4256 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with 4257 * PEBS or in ANY thread mode. Since the results are non-sensical forbid 4258 * this combination. 4259 */ 4260 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && 4261 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || 4262 event->attr.precise_ip > 0)) 4263 return -EOPNOTSUPP; 4264 4265 if (event_is_checkpointed(event)) { 4266 /* 4267 * Sampling of checkpointed events can cause situations where 4268 * the CPU constantly aborts because of a overflow, which is 4269 * then checkpointed back and ignored. Forbid checkpointing 4270 * for sampling. 4271 * 4272 * But still allow a long sampling period, so that perf stat 4273 * from KVM works. 4274 */ 4275 if (event->attr.sample_period > 0 && 4276 event->attr.sample_period < 0x7fffffff) 4277 return -EOPNOTSUPP; 4278 } 4279 return 0; 4280 } 4281 4282 static struct event_constraint counter0_constraint = 4283 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); 4284 4285 static struct event_constraint counter1_constraint = 4286 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2); 4287 4288 static struct event_constraint counter0_1_constraint = 4289 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3); 4290 4291 static struct event_constraint counter2_constraint = 4292 EVENT_CONSTRAINT(0, 0x4, 0); 4293 4294 static struct event_constraint fixed0_constraint = 4295 FIXED_EVENT_CONSTRAINT(0x00c0, 0); 4296 4297 static struct event_constraint fixed0_counter0_constraint = 4298 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); 4299 4300 static struct event_constraint fixed0_counter0_1_constraint = 4301 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); 4302 4303 static struct event_constraint counters_1_7_constraint = 4304 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); 4305 4306 static struct event_constraint * 4307 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4308 struct perf_event *event) 4309 { 4310 struct event_constraint *c; 4311 4312 c = intel_get_event_constraints(cpuc, idx, event); 4313 4314 /* Handle special quirk on in_tx_checkpointed only in counter 2 */ 4315 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { 4316 if (c->idxmsk64 & (1U << 2)) 4317 return &counter2_constraint; 4318 return &emptyconstraint; 4319 } 4320 4321 return c; 4322 } 4323 4324 static struct event_constraint * 4325 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4326 struct perf_event *event) 4327 { 4328 /* 4329 * Fixed counter 0 has less skid. 4330 * Force instruction:ppp in Fixed counter 0 4331 */ 4332 if ((event->attr.precise_ip == 3) && 4333 constraint_match(&fixed0_constraint, event->hw.config)) 4334 return &fixed0_constraint; 4335 4336 return hsw_get_event_constraints(cpuc, idx, event); 4337 } 4338 4339 static struct event_constraint * 4340 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4341 struct perf_event *event) 4342 { 4343 struct event_constraint *c; 4344 4345 c = icl_get_event_constraints(cpuc, idx, event); 4346 4347 /* 4348 * The :ppp indicates the Precise Distribution (PDist) facility, which 4349 * is only supported on the GP counter 0. If a :ppp event which is not 4350 * available on the GP counter 0, error out. 4351 * Exception: Instruction PDIR is only available on the fixed counter 0. 4352 */ 4353 if ((event->attr.precise_ip == 3) && 4354 !constraint_match(&fixed0_constraint, event->hw.config)) { 4355 if (c->idxmsk64 & BIT_ULL(0)) 4356 return &counter0_constraint; 4357 4358 return &emptyconstraint; 4359 } 4360 4361 return c; 4362 } 4363 4364 static struct event_constraint * 4365 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4366 struct perf_event *event) 4367 { 4368 struct event_constraint *c; 4369 4370 /* :ppp means to do reduced skid PEBS which is PMC0 only. */ 4371 if (event->attr.precise_ip == 3) 4372 return &counter0_constraint; 4373 4374 c = intel_get_event_constraints(cpuc, idx, event); 4375 4376 return c; 4377 } 4378 4379 static struct event_constraint * 4380 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4381 struct perf_event *event) 4382 { 4383 struct event_constraint *c; 4384 4385 c = intel_get_event_constraints(cpuc, idx, event); 4386 4387 /* 4388 * :ppp means to do reduced skid PEBS, 4389 * which is available on PMC0 and fixed counter 0. 4390 */ 4391 if (event->attr.precise_ip == 3) { 4392 /* Force instruction:ppp on PMC0 and Fixed counter 0 */ 4393 if (constraint_match(&fixed0_constraint, event->hw.config)) 4394 return &fixed0_counter0_constraint; 4395 4396 return &counter0_constraint; 4397 } 4398 4399 return c; 4400 } 4401 4402 static bool allow_tsx_force_abort = true; 4403 4404 static struct event_constraint * 4405 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4406 struct perf_event *event) 4407 { 4408 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); 4409 4410 /* 4411 * Without TFA we must not use PMC3. 4412 */ 4413 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { 4414 c = dyn_constraint(cpuc, c, idx); 4415 c->idxmsk64 &= ~(1ULL << 3); 4416 c->weight--; 4417 } 4418 4419 return c; 4420 } 4421 4422 static struct event_constraint * 4423 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4424 struct perf_event *event) 4425 { 4426 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4427 4428 if (pmu->pmu_type == hybrid_big) 4429 return glc_get_event_constraints(cpuc, idx, event); 4430 else if (pmu->pmu_type == hybrid_small) 4431 return tnt_get_event_constraints(cpuc, idx, event); 4432 4433 WARN_ON(1); 4434 return &emptyconstraint; 4435 } 4436 4437 static struct event_constraint * 4438 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4439 struct perf_event *event) 4440 { 4441 struct event_constraint *c; 4442 4443 c = intel_get_event_constraints(cpuc, idx, event); 4444 4445 /* 4446 * The :ppp indicates the Precise Distribution (PDist) facility, which 4447 * is only supported on the GP counter 0 & 1 and Fixed counter 0. 4448 * If a :ppp event which is not available on the above eligible counters, 4449 * error out. 4450 */ 4451 if (event->attr.precise_ip == 3) { 4452 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ 4453 if (constraint_match(&fixed0_constraint, event->hw.config)) { 4454 /* The fixed counter 0 doesn't support LBR event logging. */ 4455 if (branch_sample_counters(event)) 4456 return &counter0_1_constraint; 4457 else 4458 return &fixed0_counter0_1_constraint; 4459 } 4460 4461 switch (c->idxmsk64 & 0x3ull) { 4462 case 0x1: 4463 return &counter0_constraint; 4464 case 0x2: 4465 return &counter1_constraint; 4466 case 0x3: 4467 return &counter0_1_constraint; 4468 } 4469 return &emptyconstraint; 4470 } 4471 4472 return c; 4473 } 4474 4475 static struct event_constraint * 4476 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4477 struct perf_event *event) 4478 { 4479 struct event_constraint *c; 4480 4481 c = glc_get_event_constraints(cpuc, idx, event); 4482 4483 /* The Retire Latency is not supported by the fixed counter 0. */ 4484 if (event->attr.precise_ip && 4485 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && 4486 constraint_match(&fixed0_constraint, event->hw.config)) { 4487 /* 4488 * The Instruction PDIR is only available 4489 * on the fixed counter 0. Error out for this case. 4490 */ 4491 if (event->attr.precise_ip == 3) 4492 return &emptyconstraint; 4493 return &counters_1_7_constraint; 4494 } 4495 4496 return c; 4497 } 4498 4499 static struct event_constraint * 4500 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4501 struct perf_event *event) 4502 { 4503 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4504 4505 if (pmu->pmu_type == hybrid_big) 4506 return rwc_get_event_constraints(cpuc, idx, event); 4507 if (pmu->pmu_type == hybrid_small) 4508 return cmt_get_event_constraints(cpuc, idx, event); 4509 4510 WARN_ON(1); 4511 return &emptyconstraint; 4512 } 4513 4514 static int adl_hw_config(struct perf_event *event) 4515 { 4516 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4517 4518 if (pmu->pmu_type == hybrid_big) 4519 return hsw_hw_config(event); 4520 else if (pmu->pmu_type == hybrid_small) 4521 return intel_pmu_hw_config(event); 4522 4523 WARN_ON(1); 4524 return -EOPNOTSUPP; 4525 } 4526 4527 static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) 4528 { 4529 return HYBRID_INTEL_CORE; 4530 } 4531 4532 /* 4533 * Broadwell: 4534 * 4535 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared 4536 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine 4537 * the two to enforce a minimum period of 128 (the smallest value that has bits 4538 * 0-5 cleared and >= 100). 4539 * 4540 * Because of how the code in x86_perf_event_set_period() works, the truncation 4541 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period 4542 * to make up for the 'lost' events due to carrying the 'error' in period_left. 4543 * 4544 * Therefore the effective (average) period matches the requested period, 4545 * despite coarser hardware granularity. 4546 */ 4547 static void bdw_limit_period(struct perf_event *event, s64 *left) 4548 { 4549 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == 4550 X86_CONFIG(.event=0xc0, .umask=0x01)) { 4551 if (*left < 128) 4552 *left = 128; 4553 *left &= ~0x3fULL; 4554 } 4555 } 4556 4557 static void nhm_limit_period(struct perf_event *event, s64 *left) 4558 { 4559 *left = max(*left, 32LL); 4560 } 4561 4562 static void glc_limit_period(struct perf_event *event, s64 *left) 4563 { 4564 if (event->attr.precise_ip == 3) 4565 *left = max(*left, 128LL); 4566 } 4567 4568 PMU_FORMAT_ATTR(event, "config:0-7" ); 4569 PMU_FORMAT_ATTR(umask, "config:8-15" ); 4570 PMU_FORMAT_ATTR(edge, "config:18" ); 4571 PMU_FORMAT_ATTR(pc, "config:19" ); 4572 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ 4573 PMU_FORMAT_ATTR(inv, "config:23" ); 4574 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 4575 PMU_FORMAT_ATTR(in_tx, "config:32"); 4576 PMU_FORMAT_ATTR(in_tx_cp, "config:33"); 4577 4578 static struct attribute *intel_arch_formats_attr[] = { 4579 &format_attr_event.attr, 4580 &format_attr_umask.attr, 4581 &format_attr_edge.attr, 4582 &format_attr_pc.attr, 4583 &format_attr_inv.attr, 4584 &format_attr_cmask.attr, 4585 NULL, 4586 }; 4587 4588 ssize_t intel_event_sysfs_show(char *page, u64 config) 4589 { 4590 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); 4591 4592 return x86_event_sysfs_show(page, config, event); 4593 } 4594 4595 static struct intel_shared_regs *allocate_shared_regs(int cpu) 4596 { 4597 struct intel_shared_regs *regs; 4598 int i; 4599 4600 regs = kzalloc_node(sizeof(struct intel_shared_regs), 4601 GFP_KERNEL, cpu_to_node(cpu)); 4602 if (regs) { 4603 /* 4604 * initialize the locks to keep lockdep happy 4605 */ 4606 for (i = 0; i < EXTRA_REG_MAX; i++) 4607 raw_spin_lock_init(®s->regs[i].lock); 4608 4609 regs->core_id = -1; 4610 } 4611 return regs; 4612 } 4613 4614 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 4615 { 4616 struct intel_excl_cntrs *c; 4617 4618 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 4619 GFP_KERNEL, cpu_to_node(cpu)); 4620 if (c) { 4621 raw_spin_lock_init(&c->lock); 4622 c->core_id = -1; 4623 } 4624 return c; 4625 } 4626 4627 4628 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 4629 { 4630 cpuc->pebs_record_size = x86_pmu.pebs_record_size; 4631 4632 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 4633 cpuc->shared_regs = allocate_shared_regs(cpu); 4634 if (!cpuc->shared_regs) 4635 goto err; 4636 } 4637 4638 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { 4639 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); 4640 4641 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 4642 if (!cpuc->constraint_list) 4643 goto err_shared_regs; 4644 } 4645 4646 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4647 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 4648 if (!cpuc->excl_cntrs) 4649 goto err_constraint_list; 4650 4651 cpuc->excl_thread_id = 0; 4652 } 4653 4654 return 0; 4655 4656 err_constraint_list: 4657 kfree(cpuc->constraint_list); 4658 cpuc->constraint_list = NULL; 4659 4660 err_shared_regs: 4661 kfree(cpuc->shared_regs); 4662 cpuc->shared_regs = NULL; 4663 4664 err: 4665 return -ENOMEM; 4666 } 4667 4668 static int intel_pmu_cpu_prepare(int cpu) 4669 { 4670 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); 4671 } 4672 4673 static void flip_smm_bit(void *data) 4674 { 4675 unsigned long set = *(unsigned long *)data; 4676 4677 if (set > 0) { 4678 msr_set_bit(MSR_IA32_DEBUGCTLMSR, 4679 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4680 } else { 4681 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, 4682 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4683 } 4684 } 4685 4686 static void intel_pmu_check_num_counters(int *num_counters, 4687 int *num_counters_fixed, 4688 u64 *intel_ctrl, u64 fixed_mask); 4689 4690 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 4691 int num_counters, 4692 int num_counters_fixed, 4693 u64 intel_ctrl); 4694 4695 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); 4696 4697 static inline bool intel_pmu_broken_perf_cap(void) 4698 { 4699 /* The Perf Metric (Bit 15) is always cleared */ 4700 if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) || 4701 (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L)) 4702 return true; 4703 4704 return false; 4705 } 4706 4707 static void update_pmu_cap(struct x86_hybrid_pmu *pmu) 4708 { 4709 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); 4710 unsigned int eax, ebx, ecx, edx; 4711 4712 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) { 4713 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, 4714 &eax, &ebx, &ecx, &edx); 4715 pmu->num_counters = fls(eax); 4716 pmu->num_counters_fixed = fls(ebx); 4717 } 4718 4719 4720 if (!intel_pmu_broken_perf_cap()) { 4721 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ 4722 rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); 4723 } 4724 } 4725 4726 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) 4727 { 4728 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, 4729 &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1); 4730 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); 4731 pmu->unconstrained = (struct event_constraint) 4732 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 4733 0, pmu->num_counters, 0, 0); 4734 4735 if (pmu->intel_cap.perf_metrics) 4736 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 4737 else 4738 pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); 4739 4740 if (pmu->intel_cap.pebs_output_pt_available) 4741 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; 4742 else 4743 pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT; 4744 4745 intel_pmu_check_event_constraints(pmu->event_constraints, 4746 pmu->num_counters, 4747 pmu->num_counters_fixed, 4748 pmu->intel_ctrl); 4749 4750 intel_pmu_check_extra_regs(pmu->extra_regs); 4751 } 4752 4753 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) 4754 { 4755 u8 cpu_type = get_this_hybrid_cpu_type(); 4756 int i; 4757 4758 /* 4759 * This is running on a CPU model that is known to have hybrid 4760 * configurations. But the CPU told us it is not hybrid, shame 4761 * on it. There should be a fixup function provided for these 4762 * troublesome CPUs (->get_hybrid_cpu_type). 4763 */ 4764 if (cpu_type == HYBRID_INTEL_NONE) { 4765 if (x86_pmu.get_hybrid_cpu_type) 4766 cpu_type = x86_pmu.get_hybrid_cpu_type(); 4767 else 4768 return NULL; 4769 } 4770 4771 /* 4772 * This essentially just maps between the 'hybrid_cpu_type' 4773 * and 'hybrid_pmu_type' enums: 4774 */ 4775 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 4776 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; 4777 4778 if (cpu_type == HYBRID_INTEL_CORE && 4779 pmu_type == hybrid_big) 4780 return &x86_pmu.hybrid_pmu[i]; 4781 if (cpu_type == HYBRID_INTEL_ATOM && 4782 pmu_type == hybrid_small) 4783 return &x86_pmu.hybrid_pmu[i]; 4784 } 4785 4786 return NULL; 4787 } 4788 4789 static bool init_hybrid_pmu(int cpu) 4790 { 4791 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4792 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); 4793 4794 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { 4795 cpuc->pmu = NULL; 4796 return false; 4797 } 4798 4799 /* Only check and dump the PMU information for the first CPU */ 4800 if (!cpumask_empty(&pmu->supported_cpus)) 4801 goto end; 4802 4803 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) 4804 update_pmu_cap(pmu); 4805 4806 intel_pmu_check_hybrid_pmus(pmu); 4807 4808 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) 4809 return false; 4810 4811 pr_info("%s PMU driver: ", pmu->name); 4812 4813 if (pmu->intel_cap.pebs_output_pt_available) 4814 pr_cont("PEBS-via-PT "); 4815 4816 pr_cont("\n"); 4817 4818 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed, 4819 pmu->intel_ctrl); 4820 4821 end: 4822 cpumask_set_cpu(cpu, &pmu->supported_cpus); 4823 cpuc->pmu = &pmu->pmu; 4824 4825 return true; 4826 } 4827 4828 static void intel_pmu_cpu_starting(int cpu) 4829 { 4830 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4831 int core_id = topology_core_id(cpu); 4832 int i; 4833 4834 if (is_hybrid() && !init_hybrid_pmu(cpu)) 4835 return; 4836 4837 init_debug_store_on_cpu(cpu); 4838 /* 4839 * Deal with CPUs that don't clear their LBRs on power-up. 4840 */ 4841 intel_pmu_lbr_reset(); 4842 4843 cpuc->lbr_sel = NULL; 4844 4845 if (x86_pmu.flags & PMU_FL_TFA) { 4846 WARN_ON_ONCE(cpuc->tfa_shadow); 4847 cpuc->tfa_shadow = ~0ULL; 4848 intel_set_tfa(cpuc, false); 4849 } 4850 4851 if (x86_pmu.version > 1) 4852 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 4853 4854 /* 4855 * Disable perf metrics if any added CPU doesn't support it. 4856 * 4857 * Turn off the check for a hybrid architecture, because the 4858 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate 4859 * the architecture features. The perf metrics is a model-specific 4860 * feature for now. The corresponding bit should always be 0 on 4861 * a hybrid platform, e.g., Alder Lake. 4862 */ 4863 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { 4864 union perf_capabilities perf_cap; 4865 4866 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 4867 if (!perf_cap.perf_metrics) { 4868 x86_pmu.intel_cap.perf_metrics = 0; 4869 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); 4870 } 4871 } 4872 4873 if (!cpuc->shared_regs) 4874 return; 4875 4876 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 4877 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4878 struct intel_shared_regs *pc; 4879 4880 pc = per_cpu(cpu_hw_events, i).shared_regs; 4881 if (pc && pc->core_id == core_id) { 4882 cpuc->kfree_on_online[0] = cpuc->shared_regs; 4883 cpuc->shared_regs = pc; 4884 break; 4885 } 4886 } 4887 cpuc->shared_regs->core_id = core_id; 4888 cpuc->shared_regs->refcnt++; 4889 } 4890 4891 if (x86_pmu.lbr_sel_map) 4892 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 4893 4894 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4895 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4896 struct cpu_hw_events *sibling; 4897 struct intel_excl_cntrs *c; 4898 4899 sibling = &per_cpu(cpu_hw_events, i); 4900 c = sibling->excl_cntrs; 4901 if (c && c->core_id == core_id) { 4902 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 4903 cpuc->excl_cntrs = c; 4904 if (!sibling->excl_thread_id) 4905 cpuc->excl_thread_id = 1; 4906 break; 4907 } 4908 } 4909 cpuc->excl_cntrs->core_id = core_id; 4910 cpuc->excl_cntrs->refcnt++; 4911 } 4912 } 4913 4914 static void free_excl_cntrs(struct cpu_hw_events *cpuc) 4915 { 4916 struct intel_excl_cntrs *c; 4917 4918 c = cpuc->excl_cntrs; 4919 if (c) { 4920 if (c->core_id == -1 || --c->refcnt == 0) 4921 kfree(c); 4922 cpuc->excl_cntrs = NULL; 4923 } 4924 4925 kfree(cpuc->constraint_list); 4926 cpuc->constraint_list = NULL; 4927 } 4928 4929 static void intel_pmu_cpu_dying(int cpu) 4930 { 4931 fini_debug_store_on_cpu(cpu); 4932 } 4933 4934 void intel_cpuc_finish(struct cpu_hw_events *cpuc) 4935 { 4936 struct intel_shared_regs *pc; 4937 4938 pc = cpuc->shared_regs; 4939 if (pc) { 4940 if (pc->core_id == -1 || --pc->refcnt == 0) 4941 kfree(pc); 4942 cpuc->shared_regs = NULL; 4943 } 4944 4945 free_excl_cntrs(cpuc); 4946 } 4947 4948 static void intel_pmu_cpu_dead(int cpu) 4949 { 4950 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4951 4952 intel_cpuc_finish(cpuc); 4953 4954 if (is_hybrid() && cpuc->pmu) 4955 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); 4956 } 4957 4958 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, 4959 bool sched_in) 4960 { 4961 intel_pmu_pebs_sched_task(pmu_ctx, sched_in); 4962 intel_pmu_lbr_sched_task(pmu_ctx, sched_in); 4963 } 4964 4965 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, 4966 struct perf_event_pmu_context *next_epc) 4967 { 4968 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc); 4969 } 4970 4971 static int intel_pmu_check_period(struct perf_event *event, u64 value) 4972 { 4973 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 4974 } 4975 4976 static void intel_aux_output_init(void) 4977 { 4978 /* Refer also intel_pmu_aux_output_match() */ 4979 if (x86_pmu.intel_cap.pebs_output_pt_available) 4980 x86_pmu.assign = intel_pmu_assign_event; 4981 } 4982 4983 static int intel_pmu_aux_output_match(struct perf_event *event) 4984 { 4985 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */ 4986 if (!x86_pmu.intel_cap.pebs_output_pt_available) 4987 return 0; 4988 4989 return is_intel_pt_event(event); 4990 } 4991 4992 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret) 4993 { 4994 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu); 4995 4996 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus); 4997 } 4998 4999 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 5000 5001 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 5002 5003 PMU_FORMAT_ATTR(frontend, "config1:0-23"); 5004 5005 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63"); 5006 5007 static struct attribute *intel_arch3_formats_attr[] = { 5008 &format_attr_event.attr, 5009 &format_attr_umask.attr, 5010 &format_attr_edge.attr, 5011 &format_attr_pc.attr, 5012 &format_attr_any.attr, 5013 &format_attr_inv.attr, 5014 &format_attr_cmask.attr, 5015 NULL, 5016 }; 5017 5018 static struct attribute *hsw_format_attr[] = { 5019 &format_attr_in_tx.attr, 5020 &format_attr_in_tx_cp.attr, 5021 &format_attr_offcore_rsp.attr, 5022 &format_attr_ldlat.attr, 5023 NULL 5024 }; 5025 5026 static struct attribute *nhm_format_attr[] = { 5027 &format_attr_offcore_rsp.attr, 5028 &format_attr_ldlat.attr, 5029 NULL 5030 }; 5031 5032 static struct attribute *slm_format_attr[] = { 5033 &format_attr_offcore_rsp.attr, 5034 NULL 5035 }; 5036 5037 static struct attribute *cmt_format_attr[] = { 5038 &format_attr_offcore_rsp.attr, 5039 &format_attr_ldlat.attr, 5040 &format_attr_snoop_rsp.attr, 5041 NULL 5042 }; 5043 5044 static struct attribute *skl_format_attr[] = { 5045 &format_attr_frontend.attr, 5046 NULL, 5047 }; 5048 5049 static __initconst const struct x86_pmu core_pmu = { 5050 .name = "core", 5051 .handle_irq = x86_pmu_handle_irq, 5052 .disable_all = x86_pmu_disable_all, 5053 .enable_all = core_pmu_enable_all, 5054 .enable = core_pmu_enable_event, 5055 .disable = x86_pmu_disable_event, 5056 .hw_config = core_pmu_hw_config, 5057 .schedule_events = x86_schedule_events, 5058 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 5059 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 5060 .event_map = intel_pmu_event_map, 5061 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 5062 .apic = 1, 5063 .large_pebs_flags = LARGE_PEBS_FLAGS, 5064 5065 /* 5066 * Intel PMCs cannot be accessed sanely above 32-bit width, 5067 * so we install an artificial 1<<31 period regardless of 5068 * the generic event period: 5069 */ 5070 .max_period = (1ULL<<31) - 1, 5071 .get_event_constraints = intel_get_event_constraints, 5072 .put_event_constraints = intel_put_event_constraints, 5073 .event_constraints = intel_core_event_constraints, 5074 .guest_get_msrs = core_guest_get_msrs, 5075 .format_attrs = intel_arch_formats_attr, 5076 .events_sysfs_show = intel_event_sysfs_show, 5077 5078 /* 5079 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs 5080 * together with PMU version 1 and thus be using core_pmu with 5081 * shared_regs. We need following callbacks here to allocate 5082 * it properly. 5083 */ 5084 .cpu_prepare = intel_pmu_cpu_prepare, 5085 .cpu_starting = intel_pmu_cpu_starting, 5086 .cpu_dying = intel_pmu_cpu_dying, 5087 .cpu_dead = intel_pmu_cpu_dead, 5088 5089 .check_period = intel_pmu_check_period, 5090 5091 .lbr_reset = intel_pmu_lbr_reset_64, 5092 .lbr_read = intel_pmu_lbr_read_64, 5093 .lbr_save = intel_pmu_lbr_save, 5094 .lbr_restore = intel_pmu_lbr_restore, 5095 }; 5096 5097 static __initconst const struct x86_pmu intel_pmu = { 5098 .name = "Intel", 5099 .handle_irq = intel_pmu_handle_irq, 5100 .disable_all = intel_pmu_disable_all, 5101 .enable_all = intel_pmu_enable_all, 5102 .enable = intel_pmu_enable_event, 5103 .disable = intel_pmu_disable_event, 5104 .add = intel_pmu_add_event, 5105 .del = intel_pmu_del_event, 5106 .read = intel_pmu_read_event, 5107 .set_period = intel_pmu_set_period, 5108 .update = intel_pmu_update, 5109 .hw_config = intel_pmu_hw_config, 5110 .schedule_events = x86_schedule_events, 5111 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 5112 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 5113 .event_map = intel_pmu_event_map, 5114 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 5115 .apic = 1, 5116 .large_pebs_flags = LARGE_PEBS_FLAGS, 5117 /* 5118 * Intel PMCs cannot be accessed sanely above 32 bit width, 5119 * so we install an artificial 1<<31 period regardless of 5120 * the generic event period: 5121 */ 5122 .max_period = (1ULL << 31) - 1, 5123 .get_event_constraints = intel_get_event_constraints, 5124 .put_event_constraints = intel_put_event_constraints, 5125 .pebs_aliases = intel_pebs_aliases_core2, 5126 5127 .format_attrs = intel_arch3_formats_attr, 5128 .events_sysfs_show = intel_event_sysfs_show, 5129 5130 .cpu_prepare = intel_pmu_cpu_prepare, 5131 .cpu_starting = intel_pmu_cpu_starting, 5132 .cpu_dying = intel_pmu_cpu_dying, 5133 .cpu_dead = intel_pmu_cpu_dead, 5134 5135 .guest_get_msrs = intel_guest_get_msrs, 5136 .sched_task = intel_pmu_sched_task, 5137 .swap_task_ctx = intel_pmu_swap_task_ctx, 5138 5139 .check_period = intel_pmu_check_period, 5140 5141 .aux_output_match = intel_pmu_aux_output_match, 5142 5143 .lbr_reset = intel_pmu_lbr_reset_64, 5144 .lbr_read = intel_pmu_lbr_read_64, 5145 .lbr_save = intel_pmu_lbr_save, 5146 .lbr_restore = intel_pmu_lbr_restore, 5147 5148 /* 5149 * SMM has access to all 4 rings and while traditionally SMM code only 5150 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. 5151 * 5152 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction 5153 * between SMM or not, this results in what should be pure userspace 5154 * counters including SMM data. 5155 * 5156 * This is a clear privilege issue, therefore globally disable 5157 * counting SMM by default. 5158 */ 5159 .attr_freeze_on_smi = 1, 5160 }; 5161 5162 static __init void intel_clovertown_quirk(void) 5163 { 5164 /* 5165 * PEBS is unreliable due to: 5166 * 5167 * AJ67 - PEBS may experience CPL leaks 5168 * AJ68 - PEBS PMI may be delayed by one event 5169 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] 5170 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS 5171 * 5172 * AJ67 could be worked around by restricting the OS/USR flags. 5173 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. 5174 * 5175 * AJ106 could possibly be worked around by not allowing LBR 5176 * usage from PEBS, including the fixup. 5177 * AJ68 could possibly be worked around by always programming 5178 * a pebs_event_reset[0] value and coping with the lost events. 5179 * 5180 * But taken together it might just make sense to not enable PEBS on 5181 * these chips. 5182 */ 5183 pr_warn("PEBS disabled due to CPU errata\n"); 5184 x86_pmu.pebs = 0; 5185 x86_pmu.pebs_constraints = NULL; 5186 } 5187 5188 static const struct x86_cpu_desc isolation_ucodes[] = { 5189 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), 5190 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), 5191 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), 5192 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), 5193 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), 5194 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), 5195 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), 5196 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), 5197 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), 5198 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), 5199 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), 5200 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), 5201 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), 5202 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), 5203 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), 5204 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), 5205 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), 5206 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), 5207 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), 5208 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), 5209 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), 5210 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), 5211 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), 5212 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), 5213 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), 5214 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), 5215 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), 5216 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), 5217 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), 5218 {} 5219 }; 5220 5221 static void intel_check_pebs_isolation(void) 5222 { 5223 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); 5224 } 5225 5226 static __init void intel_pebs_isolation_quirk(void) 5227 { 5228 WARN_ON_ONCE(x86_pmu.check_microcode); 5229 x86_pmu.check_microcode = intel_check_pebs_isolation; 5230 intel_check_pebs_isolation(); 5231 } 5232 5233 static const struct x86_cpu_desc pebs_ucodes[] = { 5234 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), 5235 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), 5236 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), 5237 {} 5238 }; 5239 5240 static bool intel_snb_pebs_broken(void) 5241 { 5242 return !x86_cpu_has_min_microcode_rev(pebs_ucodes); 5243 } 5244 5245 static void intel_snb_check_microcode(void) 5246 { 5247 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) 5248 return; 5249 5250 /* 5251 * Serialized by the microcode lock.. 5252 */ 5253 if (x86_pmu.pebs_broken) { 5254 pr_info("PEBS enabled due to microcode update\n"); 5255 x86_pmu.pebs_broken = 0; 5256 } else { 5257 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); 5258 x86_pmu.pebs_broken = 1; 5259 } 5260 } 5261 5262 static bool is_lbr_from(unsigned long msr) 5263 { 5264 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; 5265 5266 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; 5267 } 5268 5269 /* 5270 * Under certain circumstances, access certain MSR may cause #GP. 5271 * The function tests if the input MSR can be safely accessed. 5272 */ 5273 static bool check_msr(unsigned long msr, u64 mask) 5274 { 5275 u64 val_old, val_new, val_tmp; 5276 5277 /* 5278 * Disable the check for real HW, so we don't 5279 * mess with potentially enabled registers: 5280 */ 5281 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 5282 return true; 5283 5284 /* 5285 * Read the current value, change it and read it back to see if it 5286 * matches, this is needed to detect certain hardware emulators 5287 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 5288 */ 5289 if (rdmsrl_safe(msr, &val_old)) 5290 return false; 5291 5292 /* 5293 * Only change the bits which can be updated by wrmsrl. 5294 */ 5295 val_tmp = val_old ^ mask; 5296 5297 if (is_lbr_from(msr)) 5298 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 5299 5300 if (wrmsrl_safe(msr, val_tmp) || 5301 rdmsrl_safe(msr, &val_new)) 5302 return false; 5303 5304 /* 5305 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 5306 * should equal rdmsrl()'s even with the quirk. 5307 */ 5308 if (val_new != val_tmp) 5309 return false; 5310 5311 if (is_lbr_from(msr)) 5312 val_old = lbr_from_signext_quirk_wr(val_old); 5313 5314 /* Here it's sure that the MSR can be safely accessed. 5315 * Restore the old value and return. 5316 */ 5317 wrmsrl(msr, val_old); 5318 5319 return true; 5320 } 5321 5322 static __init void intel_sandybridge_quirk(void) 5323 { 5324 x86_pmu.check_microcode = intel_snb_check_microcode; 5325 cpus_read_lock(); 5326 intel_snb_check_microcode(); 5327 cpus_read_unlock(); 5328 } 5329 5330 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 5331 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, 5332 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, 5333 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, 5334 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, 5335 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, 5336 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, 5337 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, 5338 }; 5339 5340 static __init void intel_arch_events_quirk(void) 5341 { 5342 int bit; 5343 5344 /* disable event that reported as not present by cpuid */ 5345 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 5346 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 5347 pr_warn("CPUID marked event: \'%s\' unavailable\n", 5348 intel_arch_events_map[bit].name); 5349 } 5350 } 5351 5352 static __init void intel_nehalem_quirk(void) 5353 { 5354 union cpuid10_ebx ebx; 5355 5356 ebx.full = x86_pmu.events_maskl; 5357 if (ebx.split.no_branch_misses_retired) { 5358 /* 5359 * Erratum AAJ80 detected, we work it around by using 5360 * the BR_MISP_EXEC.ANY event. This will over-count 5361 * branch-misses, but it's still much better than the 5362 * architectural event which is often completely bogus: 5363 */ 5364 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 5365 ebx.split.no_branch_misses_retired = 0; 5366 x86_pmu.events_maskl = ebx.full; 5367 pr_info("CPU erratum AAJ80 worked around\n"); 5368 } 5369 } 5370 5371 /* 5372 * enable software workaround for errata: 5373 * SNB: BJ122 5374 * IVB: BV98 5375 * HSW: HSD29 5376 * 5377 * Only needed when HT is enabled. However detecting 5378 * if HT is enabled is difficult (model specific). So instead, 5379 * we enable the workaround in the early boot, and verify if 5380 * it is needed in a later initcall phase once we have valid 5381 * topology information to check if HT is actually enabled 5382 */ 5383 static __init void intel_ht_bug(void) 5384 { 5385 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; 5386 5387 x86_pmu.start_scheduling = intel_start_scheduling; 5388 x86_pmu.commit_scheduling = intel_commit_scheduling; 5389 x86_pmu.stop_scheduling = intel_stop_scheduling; 5390 } 5391 5392 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 5393 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 5394 5395 /* Haswell special events */ 5396 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); 5397 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2"); 5398 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4"); 5399 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2"); 5400 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1"); 5401 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); 5402 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2"); 5403 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4"); 5404 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2"); 5405 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1"); 5406 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); 5407 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); 5408 5409 static struct attribute *hsw_events_attrs[] = { 5410 EVENT_PTR(td_slots_issued), 5411 EVENT_PTR(td_slots_retired), 5412 EVENT_PTR(td_fetch_bubbles), 5413 EVENT_PTR(td_total_slots), 5414 EVENT_PTR(td_total_slots_scale), 5415 EVENT_PTR(td_recovery_bubbles), 5416 EVENT_PTR(td_recovery_bubbles_scale), 5417 NULL 5418 }; 5419 5420 static struct attribute *hsw_mem_events_attrs[] = { 5421 EVENT_PTR(mem_ld_hsw), 5422 EVENT_PTR(mem_st_hsw), 5423 NULL, 5424 }; 5425 5426 static struct attribute *hsw_tsx_events_attrs[] = { 5427 EVENT_PTR(tx_start), 5428 EVENT_PTR(tx_commit), 5429 EVENT_PTR(tx_abort), 5430 EVENT_PTR(tx_capacity), 5431 EVENT_PTR(tx_conflict), 5432 EVENT_PTR(el_start), 5433 EVENT_PTR(el_commit), 5434 EVENT_PTR(el_abort), 5435 EVENT_PTR(el_capacity), 5436 EVENT_PTR(el_conflict), 5437 EVENT_PTR(cycles_t), 5438 EVENT_PTR(cycles_ct), 5439 NULL 5440 }; 5441 5442 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); 5443 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); 5444 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); 5445 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); 5446 5447 static struct attribute *icl_events_attrs[] = { 5448 EVENT_PTR(mem_ld_hsw), 5449 EVENT_PTR(mem_st_hsw), 5450 NULL, 5451 }; 5452 5453 static struct attribute *icl_td_events_attrs[] = { 5454 EVENT_PTR(slots), 5455 EVENT_PTR(td_retiring), 5456 EVENT_PTR(td_bad_spec), 5457 EVENT_PTR(td_fe_bound), 5458 EVENT_PTR(td_be_bound), 5459 NULL, 5460 }; 5461 5462 static struct attribute *icl_tsx_events_attrs[] = { 5463 EVENT_PTR(tx_start), 5464 EVENT_PTR(tx_abort), 5465 EVENT_PTR(tx_commit), 5466 EVENT_PTR(tx_capacity_read), 5467 EVENT_PTR(tx_capacity_write), 5468 EVENT_PTR(tx_conflict), 5469 EVENT_PTR(el_start), 5470 EVENT_PTR(el_abort), 5471 EVENT_PTR(el_commit), 5472 EVENT_PTR(el_capacity_read), 5473 EVENT_PTR(el_capacity_write), 5474 EVENT_PTR(el_conflict), 5475 EVENT_PTR(cycles_t), 5476 EVENT_PTR(cycles_ct), 5477 NULL, 5478 }; 5479 5480 5481 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); 5482 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); 5483 5484 static struct attribute *glc_events_attrs[] = { 5485 EVENT_PTR(mem_ld_hsw), 5486 EVENT_PTR(mem_st_spr), 5487 EVENT_PTR(mem_ld_aux), 5488 NULL, 5489 }; 5490 5491 static struct attribute *glc_td_events_attrs[] = { 5492 EVENT_PTR(slots), 5493 EVENT_PTR(td_retiring), 5494 EVENT_PTR(td_bad_spec), 5495 EVENT_PTR(td_fe_bound), 5496 EVENT_PTR(td_be_bound), 5497 EVENT_PTR(td_heavy_ops), 5498 EVENT_PTR(td_br_mispredict), 5499 EVENT_PTR(td_fetch_lat), 5500 EVENT_PTR(td_mem_bound), 5501 NULL, 5502 }; 5503 5504 static struct attribute *glc_tsx_events_attrs[] = { 5505 EVENT_PTR(tx_start), 5506 EVENT_PTR(tx_abort), 5507 EVENT_PTR(tx_commit), 5508 EVENT_PTR(tx_capacity_read), 5509 EVENT_PTR(tx_capacity_write), 5510 EVENT_PTR(tx_conflict), 5511 EVENT_PTR(cycles_t), 5512 EVENT_PTR(cycles_ct), 5513 NULL, 5514 }; 5515 5516 static ssize_t freeze_on_smi_show(struct device *cdev, 5517 struct device_attribute *attr, 5518 char *buf) 5519 { 5520 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); 5521 } 5522 5523 static DEFINE_MUTEX(freeze_on_smi_mutex); 5524 5525 static ssize_t freeze_on_smi_store(struct device *cdev, 5526 struct device_attribute *attr, 5527 const char *buf, size_t count) 5528 { 5529 unsigned long val; 5530 ssize_t ret; 5531 5532 ret = kstrtoul(buf, 0, &val); 5533 if (ret) 5534 return ret; 5535 5536 if (val > 1) 5537 return -EINVAL; 5538 5539 mutex_lock(&freeze_on_smi_mutex); 5540 5541 if (x86_pmu.attr_freeze_on_smi == val) 5542 goto done; 5543 5544 x86_pmu.attr_freeze_on_smi = val; 5545 5546 cpus_read_lock(); 5547 on_each_cpu(flip_smm_bit, &val, 1); 5548 cpus_read_unlock(); 5549 done: 5550 mutex_unlock(&freeze_on_smi_mutex); 5551 5552 return count; 5553 } 5554 5555 static void update_tfa_sched(void *ignored) 5556 { 5557 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 5558 5559 /* 5560 * check if PMC3 is used 5561 * and if so force schedule out for all event types all contexts 5562 */ 5563 if (test_bit(3, cpuc->active_mask)) 5564 perf_pmu_resched(x86_get_pmu(smp_processor_id())); 5565 } 5566 5567 static ssize_t show_sysctl_tfa(struct device *cdev, 5568 struct device_attribute *attr, 5569 char *buf) 5570 { 5571 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort); 5572 } 5573 5574 static ssize_t set_sysctl_tfa(struct device *cdev, 5575 struct device_attribute *attr, 5576 const char *buf, size_t count) 5577 { 5578 bool val; 5579 ssize_t ret; 5580 5581 ret = kstrtobool(buf, &val); 5582 if (ret) 5583 return ret; 5584 5585 /* no change */ 5586 if (val == allow_tsx_force_abort) 5587 return count; 5588 5589 allow_tsx_force_abort = val; 5590 5591 cpus_read_lock(); 5592 on_each_cpu(update_tfa_sched, NULL, 1); 5593 cpus_read_unlock(); 5594 5595 return count; 5596 } 5597 5598 5599 static DEVICE_ATTR_RW(freeze_on_smi); 5600 5601 static ssize_t branches_show(struct device *cdev, 5602 struct device_attribute *attr, 5603 char *buf) 5604 { 5605 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 5606 } 5607 5608 static DEVICE_ATTR_RO(branches); 5609 5610 static ssize_t branch_counter_nr_show(struct device *cdev, 5611 struct device_attribute *attr, 5612 char *buf) 5613 { 5614 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); 5615 } 5616 5617 static DEVICE_ATTR_RO(branch_counter_nr); 5618 5619 static ssize_t branch_counter_width_show(struct device *cdev, 5620 struct device_attribute *attr, 5621 char *buf) 5622 { 5623 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); 5624 } 5625 5626 static DEVICE_ATTR_RO(branch_counter_width); 5627 5628 static struct attribute *lbr_attrs[] = { 5629 &dev_attr_branches.attr, 5630 &dev_attr_branch_counter_nr.attr, 5631 &dev_attr_branch_counter_width.attr, 5632 NULL 5633 }; 5634 5635 static umode_t 5636 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5637 { 5638 /* branches */ 5639 if (i == 0) 5640 return x86_pmu.lbr_nr ? attr->mode : 0; 5641 5642 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; 5643 } 5644 5645 static char pmu_name_str[30]; 5646 5647 static ssize_t pmu_name_show(struct device *cdev, 5648 struct device_attribute *attr, 5649 char *buf) 5650 { 5651 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); 5652 } 5653 5654 static DEVICE_ATTR_RO(pmu_name); 5655 5656 static struct attribute *intel_pmu_caps_attrs[] = { 5657 &dev_attr_pmu_name.attr, 5658 NULL 5659 }; 5660 5661 static DEVICE_ATTR(allow_tsx_force_abort, 0644, 5662 show_sysctl_tfa, 5663 set_sysctl_tfa); 5664 5665 static struct attribute *intel_pmu_attrs[] = { 5666 &dev_attr_freeze_on_smi.attr, 5667 &dev_attr_allow_tsx_force_abort.attr, 5668 NULL, 5669 }; 5670 5671 static umode_t 5672 default_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5673 { 5674 if (attr == &dev_attr_allow_tsx_force_abort.attr) 5675 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; 5676 5677 return attr->mode; 5678 } 5679 5680 static umode_t 5681 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5682 { 5683 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; 5684 } 5685 5686 static umode_t 5687 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5688 { 5689 return x86_pmu.pebs ? attr->mode : 0; 5690 } 5691 5692 static umode_t 5693 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5694 { 5695 if (attr == &event_attr_mem_ld_aux.attr.attr) 5696 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; 5697 5698 return pebs_is_visible(kobj, attr, i); 5699 } 5700 5701 static umode_t 5702 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5703 { 5704 return x86_pmu.version >= 2 ? attr->mode : 0; 5705 } 5706 5707 static struct attribute_group group_events_td = { 5708 .name = "events", 5709 }; 5710 5711 static struct attribute_group group_events_mem = { 5712 .name = "events", 5713 .is_visible = mem_is_visible, 5714 }; 5715 5716 static struct attribute_group group_events_tsx = { 5717 .name = "events", 5718 .is_visible = tsx_is_visible, 5719 }; 5720 5721 static struct attribute_group group_caps_gen = { 5722 .name = "caps", 5723 .attrs = intel_pmu_caps_attrs, 5724 }; 5725 5726 static struct attribute_group group_caps_lbr = { 5727 .name = "caps", 5728 .attrs = lbr_attrs, 5729 .is_visible = lbr_is_visible, 5730 }; 5731 5732 static struct attribute_group group_format_extra = { 5733 .name = "format", 5734 .is_visible = exra_is_visible, 5735 }; 5736 5737 static struct attribute_group group_format_extra_skl = { 5738 .name = "format", 5739 .is_visible = exra_is_visible, 5740 }; 5741 5742 static struct attribute_group group_default = { 5743 .attrs = intel_pmu_attrs, 5744 .is_visible = default_is_visible, 5745 }; 5746 5747 static const struct attribute_group *attr_update[] = { 5748 &group_events_td, 5749 &group_events_mem, 5750 &group_events_tsx, 5751 &group_caps_gen, 5752 &group_caps_lbr, 5753 &group_format_extra, 5754 &group_format_extra_skl, 5755 &group_default, 5756 NULL, 5757 }; 5758 5759 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big); 5760 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); 5761 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); 5762 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); 5763 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); 5764 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big); 5765 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big); 5766 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big); 5767 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big); 5768 5769 static struct attribute *adl_hybrid_events_attrs[] = { 5770 EVENT_PTR(slots_adl), 5771 EVENT_PTR(td_retiring_adl), 5772 EVENT_PTR(td_bad_spec_adl), 5773 EVENT_PTR(td_fe_bound_adl), 5774 EVENT_PTR(td_be_bound_adl), 5775 EVENT_PTR(td_heavy_ops_adl), 5776 EVENT_PTR(td_br_mis_adl), 5777 EVENT_PTR(td_fetch_lat_adl), 5778 EVENT_PTR(td_mem_bound_adl), 5779 NULL, 5780 }; 5781 5782 /* Must be in IDX order */ 5783 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); 5784 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); 5785 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big); 5786 5787 static struct attribute *adl_hybrid_mem_attrs[] = { 5788 EVENT_PTR(mem_ld_adl), 5789 EVENT_PTR(mem_st_adl), 5790 EVENT_PTR(mem_ld_aux_adl), 5791 NULL, 5792 }; 5793 5794 static struct attribute *mtl_hybrid_mem_attrs[] = { 5795 EVENT_PTR(mem_ld_adl), 5796 EVENT_PTR(mem_st_adl), 5797 NULL 5798 }; 5799 5800 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); 5801 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); 5802 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); 5803 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big); 5804 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big); 5805 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); 5806 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big); 5807 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big); 5808 5809 static struct attribute *adl_hybrid_tsx_attrs[] = { 5810 EVENT_PTR(tx_start_adl), 5811 EVENT_PTR(tx_abort_adl), 5812 EVENT_PTR(tx_commit_adl), 5813 EVENT_PTR(tx_capacity_read_adl), 5814 EVENT_PTR(tx_capacity_write_adl), 5815 EVENT_PTR(tx_conflict_adl), 5816 EVENT_PTR(cycles_t_adl), 5817 EVENT_PTR(cycles_ct_adl), 5818 NULL, 5819 }; 5820 5821 FORMAT_ATTR_HYBRID(in_tx, hybrid_big); 5822 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); 5823 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); 5824 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); 5825 FORMAT_ATTR_HYBRID(frontend, hybrid_big); 5826 5827 #define ADL_HYBRID_RTM_FORMAT_ATTR \ 5828 FORMAT_HYBRID_PTR(in_tx), \ 5829 FORMAT_HYBRID_PTR(in_tx_cp) 5830 5831 #define ADL_HYBRID_FORMAT_ATTR \ 5832 FORMAT_HYBRID_PTR(offcore_rsp), \ 5833 FORMAT_HYBRID_PTR(ldlat), \ 5834 FORMAT_HYBRID_PTR(frontend) 5835 5836 static struct attribute *adl_hybrid_extra_attr_rtm[] = { 5837 ADL_HYBRID_RTM_FORMAT_ATTR, 5838 ADL_HYBRID_FORMAT_ATTR, 5839 NULL 5840 }; 5841 5842 static struct attribute *adl_hybrid_extra_attr[] = { 5843 ADL_HYBRID_FORMAT_ATTR, 5844 NULL 5845 }; 5846 5847 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small); 5848 5849 static struct attribute *mtl_hybrid_extra_attr_rtm[] = { 5850 ADL_HYBRID_RTM_FORMAT_ATTR, 5851 ADL_HYBRID_FORMAT_ATTR, 5852 FORMAT_HYBRID_PTR(snoop_rsp), 5853 NULL 5854 }; 5855 5856 static struct attribute *mtl_hybrid_extra_attr[] = { 5857 ADL_HYBRID_FORMAT_ATTR, 5858 FORMAT_HYBRID_PTR(snoop_rsp), 5859 NULL 5860 }; 5861 5862 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) 5863 { 5864 struct device *dev = kobj_to_dev(kobj); 5865 struct x86_hybrid_pmu *pmu = 5866 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5867 struct perf_pmu_events_hybrid_attr *pmu_attr = 5868 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); 5869 5870 return pmu->pmu_type & pmu_attr->pmu_type; 5871 } 5872 5873 static umode_t hybrid_events_is_visible(struct kobject *kobj, 5874 struct attribute *attr, int i) 5875 { 5876 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; 5877 } 5878 5879 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) 5880 { 5881 int cpu = cpumask_first(&pmu->supported_cpus); 5882 5883 return (cpu >= nr_cpu_ids) ? -1 : cpu; 5884 } 5885 5886 static umode_t hybrid_tsx_is_visible(struct kobject *kobj, 5887 struct attribute *attr, int i) 5888 { 5889 struct device *dev = kobj_to_dev(kobj); 5890 struct x86_hybrid_pmu *pmu = 5891 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5892 int cpu = hybrid_find_supported_cpu(pmu); 5893 5894 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; 5895 } 5896 5897 static umode_t hybrid_format_is_visible(struct kobject *kobj, 5898 struct attribute *attr, int i) 5899 { 5900 struct device *dev = kobj_to_dev(kobj); 5901 struct x86_hybrid_pmu *pmu = 5902 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5903 struct perf_pmu_format_hybrid_attr *pmu_attr = 5904 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); 5905 int cpu = hybrid_find_supported_cpu(pmu); 5906 5907 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; 5908 } 5909 5910 static struct attribute_group hybrid_group_events_td = { 5911 .name = "events", 5912 .is_visible = hybrid_events_is_visible, 5913 }; 5914 5915 static struct attribute_group hybrid_group_events_mem = { 5916 .name = "events", 5917 .is_visible = hybrid_events_is_visible, 5918 }; 5919 5920 static struct attribute_group hybrid_group_events_tsx = { 5921 .name = "events", 5922 .is_visible = hybrid_tsx_is_visible, 5923 }; 5924 5925 static struct attribute_group hybrid_group_format_extra = { 5926 .name = "format", 5927 .is_visible = hybrid_format_is_visible, 5928 }; 5929 5930 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, 5931 struct device_attribute *attr, 5932 char *buf) 5933 { 5934 struct x86_hybrid_pmu *pmu = 5935 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5936 5937 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus); 5938 } 5939 5940 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); 5941 static struct attribute *intel_hybrid_cpus_attrs[] = { 5942 &dev_attr_cpus.attr, 5943 NULL, 5944 }; 5945 5946 static struct attribute_group hybrid_group_cpus = { 5947 .attrs = intel_hybrid_cpus_attrs, 5948 }; 5949 5950 static const struct attribute_group *hybrid_attr_update[] = { 5951 &hybrid_group_events_td, 5952 &hybrid_group_events_mem, 5953 &hybrid_group_events_tsx, 5954 &group_caps_gen, 5955 &group_caps_lbr, 5956 &hybrid_group_format_extra, 5957 &group_default, 5958 &hybrid_group_cpus, 5959 NULL, 5960 }; 5961 5962 static struct attribute *empty_attrs; 5963 5964 static void intel_pmu_check_num_counters(int *num_counters, 5965 int *num_counters_fixed, 5966 u64 *intel_ctrl, u64 fixed_mask) 5967 { 5968 if (*num_counters > INTEL_PMC_MAX_GENERIC) { 5969 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 5970 *num_counters, INTEL_PMC_MAX_GENERIC); 5971 *num_counters = INTEL_PMC_MAX_GENERIC; 5972 } 5973 *intel_ctrl = (1ULL << *num_counters) - 1; 5974 5975 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) { 5976 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 5977 *num_counters_fixed, INTEL_PMC_MAX_FIXED); 5978 *num_counters_fixed = INTEL_PMC_MAX_FIXED; 5979 } 5980 5981 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; 5982 } 5983 5984 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 5985 int num_counters, 5986 int num_counters_fixed, 5987 u64 intel_ctrl) 5988 { 5989 struct event_constraint *c; 5990 5991 if (!event_constraints) 5992 return; 5993 5994 /* 5995 * event on fixed counter2 (REF_CYCLES) only works on this 5996 * counter, so do not extend mask to generic counters 5997 */ 5998 for_each_event_constraint(c, event_constraints) { 5999 /* 6000 * Don't extend the topdown slots and metrics 6001 * events to the generic counters. 6002 */ 6003 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { 6004 /* 6005 * Disable topdown slots and metrics events, 6006 * if slots event is not in CPUID. 6007 */ 6008 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) 6009 c->idxmsk64 = 0; 6010 c->weight = hweight64(c->idxmsk64); 6011 continue; 6012 } 6013 6014 if (c->cmask == FIXED_EVENT_FLAGS) { 6015 /* Disabled fixed counters which are not in CPUID */ 6016 c->idxmsk64 &= intel_ctrl; 6017 6018 /* 6019 * Don't extend the pseudo-encoding to the 6020 * generic counters 6021 */ 6022 if (!use_fixed_pseudo_encoding(c->code)) 6023 c->idxmsk64 |= (1ULL << num_counters) - 1; 6024 } 6025 c->idxmsk64 &= 6026 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); 6027 c->weight = hweight64(c->idxmsk64); 6028 } 6029 } 6030 6031 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) 6032 { 6033 struct extra_reg *er; 6034 6035 /* 6036 * Access extra MSR may cause #GP under certain circumstances. 6037 * E.g. KVM doesn't support offcore event 6038 * Check all extra_regs here. 6039 */ 6040 if (!extra_regs) 6041 return; 6042 6043 for (er = extra_regs; er->msr; er++) { 6044 er->extra_msr_access = check_msr(er->msr, 0x11UL); 6045 /* Disable LBR select mapping */ 6046 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) 6047 x86_pmu.lbr_sel_map = NULL; 6048 } 6049 } 6050 6051 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { 6052 { hybrid_small, "cpu_atom" }, 6053 { hybrid_big, "cpu_core" }, 6054 }; 6055 6056 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) 6057 { 6058 unsigned long pmus_mask = pmus; 6059 struct x86_hybrid_pmu *pmu; 6060 int idx = 0, bit; 6061 6062 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); 6063 x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, 6064 sizeof(struct x86_hybrid_pmu), 6065 GFP_KERNEL); 6066 if (!x86_pmu.hybrid_pmu) 6067 return -ENOMEM; 6068 6069 static_branch_enable(&perf_is_hybrid); 6070 x86_pmu.filter = intel_pmu_filter; 6071 6072 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { 6073 pmu = &x86_pmu.hybrid_pmu[idx++]; 6074 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; 6075 pmu->name = intel_hybrid_pmu_type_map[bit].name; 6076 6077 pmu->num_counters = x86_pmu.num_counters; 6078 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6079 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); 6080 pmu->unconstrained = (struct event_constraint) 6081 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6082 0, pmu->num_counters, 0, 0); 6083 6084 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 6085 if (pmu->pmu_type & hybrid_small) { 6086 pmu->intel_cap.perf_metrics = 0; 6087 pmu->intel_cap.pebs_output_pt_available = 1; 6088 pmu->mid_ack = true; 6089 } else if (pmu->pmu_type & hybrid_big) { 6090 pmu->intel_cap.perf_metrics = 1; 6091 pmu->intel_cap.pebs_output_pt_available = 0; 6092 pmu->late_ack = true; 6093 } 6094 } 6095 6096 return 0; 6097 } 6098 6099 static __always_inline void intel_pmu_ref_cycles_ext(void) 6100 { 6101 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED))) 6102 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c; 6103 } 6104 6105 static __always_inline void intel_pmu_init_glc(struct pmu *pmu) 6106 { 6107 x86_pmu.late_ack = true; 6108 x86_pmu.limit_period = glc_limit_period; 6109 x86_pmu.pebs_aliases = NULL; 6110 x86_pmu.pebs_prec_dist = true; 6111 x86_pmu.pebs_block = true; 6112 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6113 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6114 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6115 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6116 x86_pmu.lbr_pt_coexist = true; 6117 x86_pmu.num_topdown_events = 8; 6118 static_call_update(intel_pmu_update_topdown_event, 6119 &icl_update_topdown_event); 6120 static_call_update(intel_pmu_set_topdown_event_period, 6121 &icl_set_topdown_event_period); 6122 6123 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6124 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6125 hybrid(pmu, event_constraints) = intel_glc_event_constraints; 6126 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; 6127 6128 intel_pmu_ref_cycles_ext(); 6129 } 6130 6131 static __always_inline void intel_pmu_init_grt(struct pmu *pmu) 6132 { 6133 x86_pmu.mid_ack = true; 6134 x86_pmu.limit_period = glc_limit_period; 6135 x86_pmu.pebs_aliases = NULL; 6136 x86_pmu.pebs_prec_dist = true; 6137 x86_pmu.pebs_block = true; 6138 x86_pmu.lbr_pt_coexist = true; 6139 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6140 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6141 6142 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6143 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6144 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6145 hybrid(pmu, event_constraints) = intel_grt_event_constraints; 6146 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; 6147 hybrid(pmu, extra_regs) = intel_grt_extra_regs; 6148 6149 intel_pmu_ref_cycles_ext(); 6150 } 6151 6152 __init int intel_pmu_init(void) 6153 { 6154 struct attribute **extra_skl_attr = &empty_attrs; 6155 struct attribute **extra_attr = &empty_attrs; 6156 struct attribute **td_attr = &empty_attrs; 6157 struct attribute **mem_attr = &empty_attrs; 6158 struct attribute **tsx_attr = &empty_attrs; 6159 union cpuid10_edx edx; 6160 union cpuid10_eax eax; 6161 union cpuid10_ebx ebx; 6162 unsigned int fixed_mask; 6163 bool pmem = false; 6164 int version, i; 6165 char *name; 6166 struct x86_hybrid_pmu *pmu; 6167 6168 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 6169 switch (boot_cpu_data.x86) { 6170 case 0x6: 6171 return p6_pmu_init(); 6172 case 0xb: 6173 return knc_pmu_init(); 6174 case 0xf: 6175 return p4_pmu_init(); 6176 } 6177 return -ENODEV; 6178 } 6179 6180 /* 6181 * Check whether the Architectural PerfMon supports 6182 * Branch Misses Retired hw_event or not. 6183 */ 6184 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full); 6185 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) 6186 return -ENODEV; 6187 6188 version = eax.split.version_id; 6189 if (version < 2) 6190 x86_pmu = core_pmu; 6191 else 6192 x86_pmu = intel_pmu; 6193 6194 x86_pmu.version = version; 6195 x86_pmu.num_counters = eax.split.num_counters; 6196 x86_pmu.cntval_bits = eax.split.bit_width; 6197 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; 6198 6199 x86_pmu.events_maskl = ebx.full; 6200 x86_pmu.events_mask_len = eax.split.mask_length; 6201 6202 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 6203 x86_pmu.pebs_capable = PEBS_COUNTER_MASK; 6204 6205 /* 6206 * Quirk: v2 perfmon does not report fixed-purpose events, so 6207 * assume at least 3 events, when not running in a hypervisor: 6208 */ 6209 if (version > 1 && version < 5) { 6210 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 6211 6212 x86_pmu.num_counters_fixed = 6213 max((int)edx.split.num_counters_fixed, assume); 6214 6215 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1; 6216 } else if (version >= 5) 6217 x86_pmu.num_counters_fixed = fls(fixed_mask); 6218 6219 if (boot_cpu_has(X86_FEATURE_PDCM)) { 6220 u64 capabilities; 6221 6222 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 6223 x86_pmu.intel_cap.capabilities = capabilities; 6224 } 6225 6226 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) { 6227 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32; 6228 x86_pmu.lbr_read = intel_pmu_lbr_read_32; 6229 } 6230 6231 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) 6232 intel_pmu_arch_lbr_init(); 6233 6234 intel_ds_init(); 6235 6236 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ 6237 6238 if (version >= 5) { 6239 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; 6240 if (x86_pmu.intel_cap.anythread_deprecated) 6241 pr_cont(" AnyThread deprecated, "); 6242 } 6243 6244 /* 6245 * Install the hw-cache-events table: 6246 */ 6247 switch (boot_cpu_data.x86_model) { 6248 case INTEL_FAM6_CORE_YONAH: 6249 pr_cont("Core events, "); 6250 name = "core"; 6251 break; 6252 6253 case INTEL_FAM6_CORE2_MEROM: 6254 x86_add_quirk(intel_clovertown_quirk); 6255 fallthrough; 6256 6257 case INTEL_FAM6_CORE2_MEROM_L: 6258 case INTEL_FAM6_CORE2_PENRYN: 6259 case INTEL_FAM6_CORE2_DUNNINGTON: 6260 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 6261 sizeof(hw_cache_event_ids)); 6262 6263 intel_pmu_lbr_init_core(); 6264 6265 x86_pmu.event_constraints = intel_core2_event_constraints; 6266 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 6267 pr_cont("Core2 events, "); 6268 name = "core2"; 6269 break; 6270 6271 case INTEL_FAM6_NEHALEM: 6272 case INTEL_FAM6_NEHALEM_EP: 6273 case INTEL_FAM6_NEHALEM_EX: 6274 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 6275 sizeof(hw_cache_event_ids)); 6276 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6277 sizeof(hw_cache_extra_regs)); 6278 6279 intel_pmu_lbr_init_nhm(); 6280 6281 x86_pmu.event_constraints = intel_nehalem_event_constraints; 6282 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 6283 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6284 x86_pmu.extra_regs = intel_nehalem_extra_regs; 6285 x86_pmu.limit_period = nhm_limit_period; 6286 6287 mem_attr = nhm_mem_events_attrs; 6288 6289 /* UOPS_ISSUED.STALLED_CYCLES */ 6290 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6291 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6292 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6293 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6294 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6295 6296 intel_pmu_pebs_data_source_nhm(); 6297 x86_add_quirk(intel_nehalem_quirk); 6298 x86_pmu.pebs_no_tlb = 1; 6299 extra_attr = nhm_format_attr; 6300 6301 pr_cont("Nehalem events, "); 6302 name = "nehalem"; 6303 break; 6304 6305 case INTEL_FAM6_ATOM_BONNELL: 6306 case INTEL_FAM6_ATOM_BONNELL_MID: 6307 case INTEL_FAM6_ATOM_SALTWELL: 6308 case INTEL_FAM6_ATOM_SALTWELL_MID: 6309 case INTEL_FAM6_ATOM_SALTWELL_TABLET: 6310 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 6311 sizeof(hw_cache_event_ids)); 6312 6313 intel_pmu_lbr_init_atom(); 6314 6315 x86_pmu.event_constraints = intel_gen_event_constraints; 6316 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 6317 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 6318 pr_cont("Atom events, "); 6319 name = "bonnell"; 6320 break; 6321 6322 case INTEL_FAM6_ATOM_SILVERMONT: 6323 case INTEL_FAM6_ATOM_SILVERMONT_D: 6324 case INTEL_FAM6_ATOM_SILVERMONT_MID: 6325 case INTEL_FAM6_ATOM_AIRMONT: 6326 case INTEL_FAM6_ATOM_AIRMONT_MID: 6327 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 6328 sizeof(hw_cache_event_ids)); 6329 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 6330 sizeof(hw_cache_extra_regs)); 6331 6332 intel_pmu_lbr_init_slm(); 6333 6334 x86_pmu.event_constraints = intel_slm_event_constraints; 6335 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6336 x86_pmu.extra_regs = intel_slm_extra_regs; 6337 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6338 td_attr = slm_events_attrs; 6339 extra_attr = slm_format_attr; 6340 pr_cont("Silvermont events, "); 6341 name = "silvermont"; 6342 break; 6343 6344 case INTEL_FAM6_ATOM_GOLDMONT: 6345 case INTEL_FAM6_ATOM_GOLDMONT_D: 6346 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 6347 sizeof(hw_cache_event_ids)); 6348 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, 6349 sizeof(hw_cache_extra_regs)); 6350 6351 intel_pmu_lbr_init_skl(); 6352 6353 x86_pmu.event_constraints = intel_slm_event_constraints; 6354 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; 6355 x86_pmu.extra_regs = intel_glm_extra_regs; 6356 /* 6357 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6358 * for precise cycles. 6359 * :pp is identical to :ppp 6360 */ 6361 x86_pmu.pebs_aliases = NULL; 6362 x86_pmu.pebs_prec_dist = true; 6363 x86_pmu.lbr_pt_coexist = true; 6364 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6365 td_attr = glm_events_attrs; 6366 extra_attr = slm_format_attr; 6367 pr_cont("Goldmont events, "); 6368 name = "goldmont"; 6369 break; 6370 6371 case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 6372 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6373 sizeof(hw_cache_event_ids)); 6374 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, 6375 sizeof(hw_cache_extra_regs)); 6376 6377 intel_pmu_lbr_init_skl(); 6378 6379 x86_pmu.event_constraints = intel_slm_event_constraints; 6380 x86_pmu.extra_regs = intel_glm_extra_regs; 6381 /* 6382 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6383 * for precise cycles. 6384 */ 6385 x86_pmu.pebs_aliases = NULL; 6386 x86_pmu.pebs_prec_dist = true; 6387 x86_pmu.lbr_pt_coexist = true; 6388 x86_pmu.pebs_capable = ~0ULL; 6389 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6390 x86_pmu.flags |= PMU_FL_PEBS_ALL; 6391 x86_pmu.get_event_constraints = glp_get_event_constraints; 6392 td_attr = glm_events_attrs; 6393 /* Goldmont Plus has 4-wide pipeline */ 6394 event_attr_td_total_slots_scale_glm.event_str = "4"; 6395 extra_attr = slm_format_attr; 6396 pr_cont("Goldmont plus events, "); 6397 name = "goldmont_plus"; 6398 break; 6399 6400 case INTEL_FAM6_ATOM_TREMONT_D: 6401 case INTEL_FAM6_ATOM_TREMONT: 6402 case INTEL_FAM6_ATOM_TREMONT_L: 6403 x86_pmu.late_ack = true; 6404 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6405 sizeof(hw_cache_event_ids)); 6406 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6407 sizeof(hw_cache_extra_regs)); 6408 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6409 6410 intel_pmu_lbr_init_skl(); 6411 6412 x86_pmu.event_constraints = intel_slm_event_constraints; 6413 x86_pmu.extra_regs = intel_tnt_extra_regs; 6414 /* 6415 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6416 * for precise cycles. 6417 */ 6418 x86_pmu.pebs_aliases = NULL; 6419 x86_pmu.pebs_prec_dist = true; 6420 x86_pmu.lbr_pt_coexist = true; 6421 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6422 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6423 td_attr = tnt_events_attrs; 6424 extra_attr = slm_format_attr; 6425 pr_cont("Tremont events, "); 6426 name = "Tremont"; 6427 break; 6428 6429 case INTEL_FAM6_ATOM_GRACEMONT: 6430 intel_pmu_init_grt(NULL); 6431 intel_pmu_pebs_data_source_grt(); 6432 x86_pmu.pebs_latency_data = adl_latency_data_small; 6433 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6434 td_attr = tnt_events_attrs; 6435 mem_attr = grt_mem_attrs; 6436 extra_attr = nhm_format_attr; 6437 pr_cont("Gracemont events, "); 6438 name = "gracemont"; 6439 break; 6440 6441 case INTEL_FAM6_ATOM_CRESTMONT: 6442 case INTEL_FAM6_ATOM_CRESTMONT_X: 6443 intel_pmu_init_grt(NULL); 6444 x86_pmu.extra_regs = intel_cmt_extra_regs; 6445 intel_pmu_pebs_data_source_cmt(); 6446 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6447 x86_pmu.get_event_constraints = cmt_get_event_constraints; 6448 td_attr = cmt_events_attrs; 6449 mem_attr = grt_mem_attrs; 6450 extra_attr = cmt_format_attr; 6451 pr_cont("Crestmont events, "); 6452 name = "crestmont"; 6453 break; 6454 6455 case INTEL_FAM6_WESTMERE: 6456 case INTEL_FAM6_WESTMERE_EP: 6457 case INTEL_FAM6_WESTMERE_EX: 6458 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 6459 sizeof(hw_cache_event_ids)); 6460 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6461 sizeof(hw_cache_extra_regs)); 6462 6463 intel_pmu_lbr_init_nhm(); 6464 6465 x86_pmu.event_constraints = intel_westmere_event_constraints; 6466 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6467 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 6468 x86_pmu.extra_regs = intel_westmere_extra_regs; 6469 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6470 6471 mem_attr = nhm_mem_events_attrs; 6472 6473 /* UOPS_ISSUED.STALLED_CYCLES */ 6474 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6475 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6476 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6477 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6478 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6479 6480 intel_pmu_pebs_data_source_nhm(); 6481 extra_attr = nhm_format_attr; 6482 pr_cont("Westmere events, "); 6483 name = "westmere"; 6484 break; 6485 6486 case INTEL_FAM6_SANDYBRIDGE: 6487 case INTEL_FAM6_SANDYBRIDGE_X: 6488 x86_add_quirk(intel_sandybridge_quirk); 6489 x86_add_quirk(intel_ht_bug); 6490 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6491 sizeof(hw_cache_event_ids)); 6492 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6493 sizeof(hw_cache_extra_regs)); 6494 6495 intel_pmu_lbr_init_snb(); 6496 6497 x86_pmu.event_constraints = intel_snb_event_constraints; 6498 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 6499 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 6500 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) 6501 x86_pmu.extra_regs = intel_snbep_extra_regs; 6502 else 6503 x86_pmu.extra_regs = intel_snb_extra_regs; 6504 6505 6506 /* all extra regs are per-cpu when HT is on */ 6507 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6508 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6509 6510 td_attr = snb_events_attrs; 6511 mem_attr = snb_mem_events_attrs; 6512 6513 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6514 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6515 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6516 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 6517 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6518 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); 6519 6520 extra_attr = nhm_format_attr; 6521 6522 pr_cont("SandyBridge events, "); 6523 name = "sandybridge"; 6524 break; 6525 6526 case INTEL_FAM6_IVYBRIDGE: 6527 case INTEL_FAM6_IVYBRIDGE_X: 6528 x86_add_quirk(intel_ht_bug); 6529 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6530 sizeof(hw_cache_event_ids)); 6531 /* dTLB-load-misses on IVB is different than SNB */ 6532 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ 6533 6534 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6535 sizeof(hw_cache_extra_regs)); 6536 6537 intel_pmu_lbr_init_snb(); 6538 6539 x86_pmu.event_constraints = intel_ivb_event_constraints; 6540 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 6541 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6542 x86_pmu.pebs_prec_dist = true; 6543 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) 6544 x86_pmu.extra_regs = intel_snbep_extra_regs; 6545 else 6546 x86_pmu.extra_regs = intel_snb_extra_regs; 6547 /* all extra regs are per-cpu when HT is on */ 6548 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6549 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6550 6551 td_attr = snb_events_attrs; 6552 mem_attr = snb_mem_events_attrs; 6553 6554 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6555 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6556 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6557 6558 extra_attr = nhm_format_attr; 6559 6560 pr_cont("IvyBridge events, "); 6561 name = "ivybridge"; 6562 break; 6563 6564 6565 case INTEL_FAM6_HASWELL: 6566 case INTEL_FAM6_HASWELL_X: 6567 case INTEL_FAM6_HASWELL_L: 6568 case INTEL_FAM6_HASWELL_G: 6569 x86_add_quirk(intel_ht_bug); 6570 x86_add_quirk(intel_pebs_isolation_quirk); 6571 x86_pmu.late_ack = true; 6572 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6573 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6574 6575 intel_pmu_lbr_init_hsw(); 6576 6577 x86_pmu.event_constraints = intel_hsw_event_constraints; 6578 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 6579 x86_pmu.extra_regs = intel_snbep_extra_regs; 6580 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6581 x86_pmu.pebs_prec_dist = true; 6582 /* all extra regs are per-cpu when HT is on */ 6583 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6584 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6585 6586 x86_pmu.hw_config = hsw_hw_config; 6587 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6588 x86_pmu.lbr_double_abort = true; 6589 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6590 hsw_format_attr : nhm_format_attr; 6591 td_attr = hsw_events_attrs; 6592 mem_attr = hsw_mem_events_attrs; 6593 tsx_attr = hsw_tsx_events_attrs; 6594 pr_cont("Haswell events, "); 6595 name = "haswell"; 6596 break; 6597 6598 case INTEL_FAM6_BROADWELL: 6599 case INTEL_FAM6_BROADWELL_D: 6600 case INTEL_FAM6_BROADWELL_G: 6601 case INTEL_FAM6_BROADWELL_X: 6602 x86_add_quirk(intel_pebs_isolation_quirk); 6603 x86_pmu.late_ack = true; 6604 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6605 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6606 6607 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ 6608 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | 6609 BDW_L3_MISS|HSW_SNOOP_DRAM; 6610 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| 6611 HSW_SNOOP_DRAM; 6612 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| 6613 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6614 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| 6615 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6616 6617 intel_pmu_lbr_init_hsw(); 6618 6619 x86_pmu.event_constraints = intel_bdw_event_constraints; 6620 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; 6621 x86_pmu.extra_regs = intel_snbep_extra_regs; 6622 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6623 x86_pmu.pebs_prec_dist = true; 6624 /* all extra regs are per-cpu when HT is on */ 6625 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6626 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6627 6628 x86_pmu.hw_config = hsw_hw_config; 6629 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6630 x86_pmu.limit_period = bdw_limit_period; 6631 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6632 hsw_format_attr : nhm_format_attr; 6633 td_attr = hsw_events_attrs; 6634 mem_attr = hsw_mem_events_attrs; 6635 tsx_attr = hsw_tsx_events_attrs; 6636 pr_cont("Broadwell events, "); 6637 name = "broadwell"; 6638 break; 6639 6640 case INTEL_FAM6_XEON_PHI_KNL: 6641 case INTEL_FAM6_XEON_PHI_KNM: 6642 memcpy(hw_cache_event_ids, 6643 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6644 memcpy(hw_cache_extra_regs, 6645 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6646 intel_pmu_lbr_init_knl(); 6647 6648 x86_pmu.event_constraints = intel_slm_event_constraints; 6649 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6650 x86_pmu.extra_regs = intel_knl_extra_regs; 6651 6652 /* all extra regs are per-cpu when HT is on */ 6653 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6654 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6655 extra_attr = slm_format_attr; 6656 pr_cont("Knights Landing/Mill events, "); 6657 name = "knights-landing"; 6658 break; 6659 6660 case INTEL_FAM6_SKYLAKE_X: 6661 pmem = true; 6662 fallthrough; 6663 case INTEL_FAM6_SKYLAKE_L: 6664 case INTEL_FAM6_SKYLAKE: 6665 case INTEL_FAM6_KABYLAKE_L: 6666 case INTEL_FAM6_KABYLAKE: 6667 case INTEL_FAM6_COMETLAKE_L: 6668 case INTEL_FAM6_COMETLAKE: 6669 x86_add_quirk(intel_pebs_isolation_quirk); 6670 x86_pmu.late_ack = true; 6671 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6672 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6673 intel_pmu_lbr_init_skl(); 6674 6675 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ 6676 event_attr_td_recovery_bubbles.event_str_noht = 6677 "event=0xd,umask=0x1,cmask=1"; 6678 event_attr_td_recovery_bubbles.event_str_ht = 6679 "event=0xd,umask=0x1,cmask=1,any=1"; 6680 6681 x86_pmu.event_constraints = intel_skl_event_constraints; 6682 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; 6683 x86_pmu.extra_regs = intel_skl_extra_regs; 6684 x86_pmu.pebs_aliases = intel_pebs_aliases_skl; 6685 x86_pmu.pebs_prec_dist = true; 6686 /* all extra regs are per-cpu when HT is on */ 6687 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6688 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6689 6690 x86_pmu.hw_config = hsw_hw_config; 6691 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6692 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6693 hsw_format_attr : nhm_format_attr; 6694 extra_skl_attr = skl_format_attr; 6695 td_attr = hsw_events_attrs; 6696 mem_attr = hsw_mem_events_attrs; 6697 tsx_attr = hsw_tsx_events_attrs; 6698 intel_pmu_pebs_data_source_skl(pmem); 6699 6700 /* 6701 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default. 6702 * TSX force abort hooks are not required on these systems. Only deploy 6703 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT. 6704 */ 6705 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) && 6706 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { 6707 x86_pmu.flags |= PMU_FL_TFA; 6708 x86_pmu.get_event_constraints = tfa_get_event_constraints; 6709 x86_pmu.enable_all = intel_tfa_pmu_enable_all; 6710 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; 6711 } 6712 6713 pr_cont("Skylake events, "); 6714 name = "skylake"; 6715 break; 6716 6717 case INTEL_FAM6_ICELAKE_X: 6718 case INTEL_FAM6_ICELAKE_D: 6719 x86_pmu.pebs_ept = 1; 6720 pmem = true; 6721 fallthrough; 6722 case INTEL_FAM6_ICELAKE_L: 6723 case INTEL_FAM6_ICELAKE: 6724 case INTEL_FAM6_TIGERLAKE_L: 6725 case INTEL_FAM6_TIGERLAKE: 6726 case INTEL_FAM6_ROCKETLAKE: 6727 x86_pmu.late_ack = true; 6728 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6729 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6730 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6731 intel_pmu_lbr_init_skl(); 6732 6733 x86_pmu.event_constraints = intel_icl_event_constraints; 6734 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; 6735 x86_pmu.extra_regs = intel_icl_extra_regs; 6736 x86_pmu.pebs_aliases = NULL; 6737 x86_pmu.pebs_prec_dist = true; 6738 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6739 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6740 6741 x86_pmu.hw_config = hsw_hw_config; 6742 x86_pmu.get_event_constraints = icl_get_event_constraints; 6743 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6744 hsw_format_attr : nhm_format_attr; 6745 extra_skl_attr = skl_format_attr; 6746 mem_attr = icl_events_attrs; 6747 td_attr = icl_td_events_attrs; 6748 tsx_attr = icl_tsx_events_attrs; 6749 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6750 x86_pmu.lbr_pt_coexist = true; 6751 intel_pmu_pebs_data_source_skl(pmem); 6752 x86_pmu.num_topdown_events = 4; 6753 static_call_update(intel_pmu_update_topdown_event, 6754 &icl_update_topdown_event); 6755 static_call_update(intel_pmu_set_topdown_event_period, 6756 &icl_set_topdown_event_period); 6757 pr_cont("Icelake events, "); 6758 name = "icelake"; 6759 break; 6760 6761 case INTEL_FAM6_SAPPHIRERAPIDS_X: 6762 case INTEL_FAM6_EMERALDRAPIDS_X: 6763 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6764 x86_pmu.extra_regs = intel_glc_extra_regs; 6765 fallthrough; 6766 case INTEL_FAM6_GRANITERAPIDS_X: 6767 case INTEL_FAM6_GRANITERAPIDS_D: 6768 intel_pmu_init_glc(NULL); 6769 if (!x86_pmu.extra_regs) 6770 x86_pmu.extra_regs = intel_rwc_extra_regs; 6771 x86_pmu.pebs_ept = 1; 6772 x86_pmu.hw_config = hsw_hw_config; 6773 x86_pmu.get_event_constraints = glc_get_event_constraints; 6774 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6775 hsw_format_attr : nhm_format_attr; 6776 extra_skl_attr = skl_format_attr; 6777 mem_attr = glc_events_attrs; 6778 td_attr = glc_td_events_attrs; 6779 tsx_attr = glc_tsx_events_attrs; 6780 intel_pmu_pebs_data_source_skl(true); 6781 pr_cont("Sapphire Rapids events, "); 6782 name = "sapphire_rapids"; 6783 break; 6784 6785 case INTEL_FAM6_ALDERLAKE: 6786 case INTEL_FAM6_ALDERLAKE_L: 6787 case INTEL_FAM6_RAPTORLAKE: 6788 case INTEL_FAM6_RAPTORLAKE_P: 6789 case INTEL_FAM6_RAPTORLAKE_S: 6790 /* 6791 * Alder Lake has 2 types of CPU, core and atom. 6792 * 6793 * Initialize the common PerfMon capabilities here. 6794 */ 6795 intel_pmu_init_hybrid(hybrid_big_small); 6796 6797 x86_pmu.pebs_latency_data = adl_latency_data_small; 6798 x86_pmu.get_event_constraints = adl_get_event_constraints; 6799 x86_pmu.hw_config = adl_hw_config; 6800 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; 6801 6802 td_attr = adl_hybrid_events_attrs; 6803 mem_attr = adl_hybrid_mem_attrs; 6804 tsx_attr = adl_hybrid_tsx_attrs; 6805 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6806 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; 6807 6808 /* Initialize big core specific PerfMon capabilities.*/ 6809 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 6810 intel_pmu_init_glc(&pmu->pmu); 6811 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { 6812 pmu->num_counters = x86_pmu.num_counters + 2; 6813 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; 6814 } else { 6815 pmu->num_counters = x86_pmu.num_counters; 6816 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6817 } 6818 6819 /* 6820 * Quirk: For some Alder Lake machine, when all E-cores are disabled in 6821 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, 6822 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will 6823 * mistakenly add extra counters for P-cores. Correct the number of 6824 * counters here. 6825 */ 6826 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) { 6827 pmu->num_counters = x86_pmu.num_counters; 6828 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6829 } 6830 6831 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); 6832 pmu->unconstrained = (struct event_constraint) 6833 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6834 0, pmu->num_counters, 0, 0); 6835 pmu->extra_regs = intel_glc_extra_regs; 6836 6837 /* Initialize Atom core specific PerfMon capabilities.*/ 6838 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 6839 intel_pmu_init_grt(&pmu->pmu); 6840 6841 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6842 intel_pmu_pebs_data_source_adl(); 6843 pr_cont("Alderlake Hybrid events, "); 6844 name = "alderlake_hybrid"; 6845 break; 6846 6847 case INTEL_FAM6_METEORLAKE: 6848 case INTEL_FAM6_METEORLAKE_L: 6849 intel_pmu_init_hybrid(hybrid_big_small); 6850 6851 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6852 x86_pmu.get_event_constraints = mtl_get_event_constraints; 6853 x86_pmu.hw_config = adl_hw_config; 6854 6855 td_attr = adl_hybrid_events_attrs; 6856 mem_attr = mtl_hybrid_mem_attrs; 6857 tsx_attr = adl_hybrid_tsx_attrs; 6858 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6859 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 6860 6861 /* Initialize big core specific PerfMon capabilities.*/ 6862 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 6863 intel_pmu_init_glc(&pmu->pmu); 6864 pmu->extra_regs = intel_rwc_extra_regs; 6865 6866 /* Initialize Atom core specific PerfMon capabilities.*/ 6867 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 6868 intel_pmu_init_grt(&pmu->pmu); 6869 pmu->extra_regs = intel_cmt_extra_regs; 6870 6871 intel_pmu_pebs_data_source_mtl(); 6872 pr_cont("Meteorlake Hybrid events, "); 6873 name = "meteorlake_hybrid"; 6874 break; 6875 6876 default: 6877 switch (x86_pmu.version) { 6878 case 1: 6879 x86_pmu.event_constraints = intel_v1_event_constraints; 6880 pr_cont("generic architected perfmon v1, "); 6881 name = "generic_arch_v1"; 6882 break; 6883 case 2: 6884 case 3: 6885 case 4: 6886 /* 6887 * default constraints for v2 and up 6888 */ 6889 x86_pmu.event_constraints = intel_gen_event_constraints; 6890 pr_cont("generic architected perfmon, "); 6891 name = "generic_arch_v2+"; 6892 break; 6893 default: 6894 /* 6895 * The default constraints for v5 and up can support up to 6896 * 16 fixed counters. For the fixed counters 4 and later, 6897 * the pseudo-encoding is applied. 6898 * The constraints may be cut according to the CPUID enumeration 6899 * by inserting the EVENT_CONSTRAINT_END. 6900 */ 6901 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) 6902 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; 6903 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1; 6904 x86_pmu.event_constraints = intel_v5_gen_event_constraints; 6905 pr_cont("generic architected perfmon, "); 6906 name = "generic_arch_v5+"; 6907 break; 6908 } 6909 } 6910 6911 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 6912 6913 if (!is_hybrid()) { 6914 group_events_td.attrs = td_attr; 6915 group_events_mem.attrs = mem_attr; 6916 group_events_tsx.attrs = tsx_attr; 6917 group_format_extra.attrs = extra_attr; 6918 group_format_extra_skl.attrs = extra_skl_attr; 6919 6920 x86_pmu.attr_update = attr_update; 6921 } else { 6922 hybrid_group_events_td.attrs = td_attr; 6923 hybrid_group_events_mem.attrs = mem_attr; 6924 hybrid_group_events_tsx.attrs = tsx_attr; 6925 hybrid_group_format_extra.attrs = extra_attr; 6926 6927 x86_pmu.attr_update = hybrid_attr_update; 6928 } 6929 6930 intel_pmu_check_num_counters(&x86_pmu.num_counters, 6931 &x86_pmu.num_counters_fixed, 6932 &x86_pmu.intel_ctrl, 6933 (u64)fixed_mask); 6934 6935 /* AnyThread may be deprecated on arch perfmon v5 or later */ 6936 if (x86_pmu.intel_cap.anythread_deprecated) 6937 x86_pmu.format_attrs = intel_arch_formats_attr; 6938 6939 intel_pmu_check_event_constraints(x86_pmu.event_constraints, 6940 x86_pmu.num_counters, 6941 x86_pmu.num_counters_fixed, 6942 x86_pmu.intel_ctrl); 6943 /* 6944 * Access LBR MSR may cause #GP under certain circumstances. 6945 * Check all LBR MSR here. 6946 * Disable LBR access if any LBR MSRs can not be accessed. 6947 */ 6948 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL)) 6949 x86_pmu.lbr_nr = 0; 6950 for (i = 0; i < x86_pmu.lbr_nr; i++) { 6951 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && 6952 check_msr(x86_pmu.lbr_to + i, 0xffffUL))) 6953 x86_pmu.lbr_nr = 0; 6954 } 6955 6956 if (x86_pmu.lbr_nr) { 6957 intel_pmu_lbr_init(); 6958 6959 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 6960 6961 /* only support branch_stack snapshot for perfmon >= v2 */ 6962 if (x86_pmu.disable_all == intel_pmu_disable_all) { 6963 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { 6964 static_call_update(perf_snapshot_branch_stack, 6965 intel_pmu_snapshot_arch_branch_stack); 6966 } else { 6967 static_call_update(perf_snapshot_branch_stack, 6968 intel_pmu_snapshot_branch_stack); 6969 } 6970 } 6971 } 6972 6973 intel_pmu_check_extra_regs(x86_pmu.extra_regs); 6974 6975 /* Support full width counters using alternative MSR range */ 6976 if (x86_pmu.intel_cap.full_width_write) { 6977 x86_pmu.max_period = x86_pmu.cntval_mask >> 1; 6978 x86_pmu.perfctr = MSR_IA32_PMC0; 6979 pr_cont("full-width counters, "); 6980 } 6981 6982 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) 6983 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 6984 6985 if (x86_pmu.intel_cap.pebs_timing_info) 6986 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; 6987 6988 intel_aux_output_init(); 6989 6990 return 0; 6991 } 6992 6993 /* 6994 * HT bug: phase 2 init 6995 * Called once we have valid topology information to check 6996 * whether or not HT is enabled 6997 * If HT is off, then we disable the workaround 6998 */ 6999 static __init int fixup_ht_bug(void) 7000 { 7001 int c; 7002 /* 7003 * problem not present on this CPU model, nothing to do 7004 */ 7005 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 7006 return 0; 7007 7008 if (topology_max_smt_threads() > 1) { 7009 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 7010 return 0; 7011 } 7012 7013 cpus_read_lock(); 7014 7015 hardlockup_detector_perf_stop(); 7016 7017 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 7018 7019 x86_pmu.start_scheduling = NULL; 7020 x86_pmu.commit_scheduling = NULL; 7021 x86_pmu.stop_scheduling = NULL; 7022 7023 hardlockup_detector_perf_restart(); 7024 7025 for_each_online_cpu(c) 7026 free_excl_cntrs(&per_cpu(cpu_hw_events, c)); 7027 7028 cpus_read_unlock(); 7029 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); 7030 return 0; 7031 } 7032 subsys_initcall(fixup_ht_bug) 7033