xref: /freebsd/sys/dev/hwpmc/hwpmc_core.c (revision c1d255d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Joseph Koshy
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Core PMCs.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/pmckern.h>
40 #include <sys/smp.h>
41 #include <sys/systm.h>
42 
43 #include <machine/intr_machdep.h>
44 #include <x86/apicvar.h>
45 #include <machine/cpu.h>
46 #include <machine/cpufunc.h>
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
49 
50 #define	CORE_CPUID_REQUEST		0xA
51 #define	CORE_CPUID_REQUEST_SIZE		0x4
52 #define	CORE_CPUID_EAX			0x0
53 #define	CORE_CPUID_EBX			0x1
54 #define	CORE_CPUID_ECX			0x2
55 #define	CORE_CPUID_EDX			0x3
56 
57 #define	IAF_PMC_CAPS			\
58 	(PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
59 	 PMC_CAP_USER | PMC_CAP_SYSTEM)
60 #define	IAF_RI_TO_MSR(RI)		((RI) + (1 << 30))
61 
62 #define	IAP_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
63     PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE |	 \
64     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
65 
66 #define	EV_IS_NOTARCH		0
67 #define	EV_IS_ARCH_SUPP		1
68 #define	EV_IS_ARCH_NOTSUPP	-1
69 
70 /*
71  * "Architectural" events defined by Intel.  The values of these
72  * symbols correspond to positions in the bitmask returned by
73  * the CPUID.0AH instruction.
74  */
75 enum core_arch_events {
76 	CORE_AE_BRANCH_INSTRUCTION_RETIRED	= 5,
77 	CORE_AE_BRANCH_MISSES_RETIRED		= 6,
78 	CORE_AE_INSTRUCTION_RETIRED		= 1,
79 	CORE_AE_LLC_MISSES			= 4,
80 	CORE_AE_LLC_REFERENCE			= 3,
81 	CORE_AE_UNHALTED_REFERENCE_CYCLES	= 2,
82 	CORE_AE_UNHALTED_CORE_CYCLES		= 0
83 };
84 
85 static enum pmc_cputype	core_cputype;
86 
87 struct core_cpu {
88 	volatile uint32_t	pc_resync;
89 	volatile uint32_t	pc_iafctrl;	/* Fixed function control. */
90 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
91 	struct pmc_hw		pc_corepmcs[];
92 };
93 
94 static struct core_cpu **core_pcpu;
95 
96 static uint32_t core_architectural_events;
97 static uint64_t core_pmcmask;
98 
99 static int core_iaf_ri;		/* relative index of fixed counters */
100 static int core_iaf_width;
101 static int core_iaf_npmc;
102 
103 static int core_iap_width;
104 static int core_iap_npmc;
105 static int core_iap_wroffset;
106 
107 static u_int pmc_alloc_refs;
108 static bool pmc_tsx_force_abort_set;
109 
110 static int
111 core_pcpu_noop(struct pmc_mdep *md, int cpu)
112 {
113 	(void) md;
114 	(void) cpu;
115 	return (0);
116 }
117 
118 static int
119 core_pcpu_init(struct pmc_mdep *md, int cpu)
120 {
121 	struct pmc_cpu *pc;
122 	struct core_cpu *cc;
123 	struct pmc_hw *phw;
124 	int core_ri, n, npmc;
125 
126 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
127 	    ("[iaf,%d] insane cpu number %d", __LINE__, cpu));
128 
129 	PMCDBG1(MDP,INI,1,"core-init cpu=%d", cpu);
130 
131 	core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
132 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
133 
134 	if (core_cputype != PMC_CPU_INTEL_CORE)
135 		npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
136 
137 	cc = malloc(sizeof(struct core_cpu) + npmc * sizeof(struct pmc_hw),
138 	    M_PMC, M_WAITOK | M_ZERO);
139 
140 	core_pcpu[cpu] = cc;
141 	pc = pmc_pcpu[cpu];
142 
143 	KASSERT(pc != NULL && cc != NULL,
144 	    ("[core,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
145 
146 	for (n = 0, phw = cc->pc_corepmcs; n < npmc; n++, phw++) {
147 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
148 		    PMC_PHW_CPU_TO_STATE(cpu) |
149 		    PMC_PHW_INDEX_TO_STATE(n + core_ri);
150 		phw->phw_pmc	  = NULL;
151 		pc->pc_hwpmcs[n + core_ri]  = phw;
152 	}
153 
154 	return (0);
155 }
156 
157 static int
158 core_pcpu_fini(struct pmc_mdep *md, int cpu)
159 {
160 	int core_ri, n, npmc;
161 	struct pmc_cpu *pc;
162 	struct core_cpu *cc;
163 	uint64_t msr = 0;
164 
165 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
166 	    ("[core,%d] insane cpu number (%d)", __LINE__, cpu));
167 
168 	PMCDBG1(MDP,INI,1,"core-pcpu-fini cpu=%d", cpu);
169 
170 	if ((cc = core_pcpu[cpu]) == NULL)
171 		return (0);
172 
173 	core_pcpu[cpu] = NULL;
174 
175 	pc = pmc_pcpu[cpu];
176 
177 	KASSERT(pc != NULL, ("[core,%d] NULL per-cpu %d state", __LINE__,
178 		cpu));
179 
180 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
181 	core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
182 
183 	for (n = 0; n < npmc; n++) {
184 		msr = rdmsr(IAP_EVSEL0 + n) & ~IAP_EVSEL_MASK;
185 		wrmsr(IAP_EVSEL0 + n, msr);
186 	}
187 
188 	if (core_cputype != PMC_CPU_INTEL_CORE) {
189 		msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
190 		wrmsr(IAF_CTRL, msr);
191 		npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
192 	}
193 
194 	for (n = 0; n < npmc; n++)
195 		pc->pc_hwpmcs[n + core_ri] = NULL;
196 
197 	free(cc, M_PMC);
198 
199 	return (0);
200 }
201 
202 /*
203  * Fixed function counters.
204  */
205 
206 static pmc_value_t
207 iaf_perfctr_value_to_reload_count(pmc_value_t v)
208 {
209 
210 	/* If the PMC has overflowed, return a reload count of zero. */
211 	if ((v & (1ULL << (core_iaf_width - 1))) == 0)
212 		return (0);
213 	v &= (1ULL << core_iaf_width) - 1;
214 	return (1ULL << core_iaf_width) - v;
215 }
216 
217 static pmc_value_t
218 iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
219 {
220 	return (1ULL << core_iaf_width) - rlc;
221 }
222 
223 static int
224 iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
225     const struct pmc_op_pmcallocate *a)
226 {
227 	uint8_t ev, umask;
228 	uint32_t caps, flags, config;
229 	const struct pmc_md_iap_op_pmcallocate *iap;
230 
231 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
232 	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
233 
234 	PMCDBG2(MDP,ALL,1, "iaf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
235 
236 	if (ri < 0 || ri > core_iaf_npmc)
237 		return (EINVAL);
238 
239 	if (a->pm_class != PMC_CLASS_IAF)
240 		return (EINVAL);
241 
242 	iap = &a->pm_md.pm_iap;
243 	config = iap->pm_iap_config;
244 	ev = IAP_EVSEL_GET(config);
245 	umask = IAP_UMASK_GET(config);
246 
247 	/* INST_RETIRED.ANY */
248 	if (ev == 0xC0 && ri != 0)
249 		return (EINVAL);
250 	/* CPU_CLK_UNHALTED.THREAD */
251 	if (ev == 0x3C && ri != 1)
252 		return (EINVAL);
253 	/* CPU_CLK_UNHALTED.REF */
254 	if (ev == 0x0 && umask == 0x3 && ri != 2)
255 		return (EINVAL);
256 
257 	pmc_alloc_refs++;
258 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_TSXFA) != 0 &&
259 	    !pmc_tsx_force_abort_set) {
260 		pmc_tsx_force_abort_set = true;
261 		x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL |
262 		    MSR_OP_WRITE, 1, NULL);
263 	}
264 
265 	flags = 0;
266 	if (config & IAP_OS)
267 		flags |= IAF_OS;
268 	if (config & IAP_USR)
269 		flags |= IAF_USR;
270 	if (config & IAP_ANY)
271 		flags |= IAF_ANY;
272 	if (config & IAP_INT)
273 		flags |= IAF_PMI;
274 
275 	caps = a->pm_caps;
276 	if (caps & PMC_CAP_INTERRUPT)
277 		flags |= IAF_PMI;
278 	if (caps & PMC_CAP_SYSTEM)
279 		flags |= IAF_OS;
280 	if (caps & PMC_CAP_USER)
281 		flags |= IAF_USR;
282 	if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
283 		flags |= (IAF_OS | IAF_USR);
284 
285 	pm->pm_md.pm_iaf.pm_iaf_ctrl = (flags << (ri * 4));
286 
287 	PMCDBG1(MDP,ALL,2, "iaf-allocate config=0x%jx",
288 	    (uintmax_t) pm->pm_md.pm_iaf.pm_iaf_ctrl);
289 
290 	return (0);
291 }
292 
293 static int
294 iaf_config_pmc(int cpu, int ri, struct pmc *pm)
295 {
296 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
297 	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
298 
299 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
300 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
301 
302 	PMCDBG3(MDP,CFG,1, "iaf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
303 
304 	KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
305 	    cpu));
306 
307 	core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc = pm;
308 
309 	return (0);
310 }
311 
312 static int
313 iaf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
314 {
315 	int error;
316 	struct pmc_hw *phw;
317 	char iaf_name[PMC_NAME_MAX];
318 
319 	phw = &core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri];
320 
321 	(void) snprintf(iaf_name, sizeof(iaf_name), "IAF-%d", ri);
322 	if ((error = copystr(iaf_name, pi->pm_name, PMC_NAME_MAX,
323 	    NULL)) != 0)
324 		return (error);
325 
326 	pi->pm_class = PMC_CLASS_IAF;
327 
328 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
329 		pi->pm_enabled = TRUE;
330 		*ppmc          = phw->phw_pmc;
331 	} else {
332 		pi->pm_enabled = FALSE;
333 		*ppmc          = NULL;
334 	}
335 
336 	return (0);
337 }
338 
339 static int
340 iaf_get_config(int cpu, int ri, struct pmc **ppm)
341 {
342 	*ppm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
343 
344 	return (0);
345 }
346 
347 static int
348 iaf_get_msr(int ri, uint32_t *msr)
349 {
350 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
351 	    ("[iaf,%d] ri %d out of range", __LINE__, ri));
352 
353 	*msr = IAF_RI_TO_MSR(ri);
354 
355 	return (0);
356 }
357 
358 static int
359 iaf_read_pmc(int cpu, int ri, pmc_value_t *v)
360 {
361 	struct pmc *pm;
362 	pmc_value_t tmp;
363 
364 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
365 	    ("[core,%d] illegal cpu value %d", __LINE__, cpu));
366 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
367 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
368 
369 	pm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
370 
371 	KASSERT(pm,
372 	    ("[core,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
373 		ri, ri + core_iaf_ri));
374 
375 	tmp = rdpmc(IAF_RI_TO_MSR(ri));
376 
377 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
378 		*v = iaf_perfctr_value_to_reload_count(tmp);
379 	else
380 		*v = tmp & ((1ULL << core_iaf_width) - 1);
381 
382 	PMCDBG4(MDP,REA,1, "iaf-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
383 	    IAF_RI_TO_MSR(ri), *v);
384 
385 	return (0);
386 }
387 
388 static int
389 iaf_release_pmc(int cpu, int ri, struct pmc *pmc)
390 {
391 	PMCDBG3(MDP,REL,1, "iaf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
392 
393 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
394 	    ("[core,%d] illegal CPU value %d", __LINE__, cpu));
395 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
396 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
397 
398 	KASSERT(core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc == NULL,
399 	    ("[core,%d] PHW pmc non-NULL", __LINE__));
400 
401 	MPASS(pmc_alloc_refs > 0);
402 	if (pmc_alloc_refs-- == 1 && pmc_tsx_force_abort_set) {
403 		pmc_tsx_force_abort_set = false;
404 		x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL |
405 		    MSR_OP_WRITE, 0, NULL);
406 	}
407 
408 	return (0);
409 }
410 
411 static int
412 iaf_start_pmc(int cpu, int ri)
413 {
414 	struct pmc *pm;
415 	struct core_cpu *iafc;
416 	uint64_t msr = 0;
417 
418 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
419 	    ("[core,%d] illegal CPU value %d", __LINE__, cpu));
420 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
421 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
422 
423 	PMCDBG2(MDP,STA,1,"iaf-start cpu=%d ri=%d", cpu, ri);
424 
425 	iafc = core_pcpu[cpu];
426 	pm = iafc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
427 
428 	iafc->pc_iafctrl |= pm->pm_md.pm_iaf.pm_iaf_ctrl;
429 
430  	msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
431  	wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
432 
433 	do {
434 		iafc->pc_resync = 0;
435 		iafc->pc_globalctrl |= (1ULL << (ri + IAF_OFFSET));
436  		msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
437  		wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
438  					     IAF_GLOBAL_CTRL_MASK));
439 	} while (iafc->pc_resync != 0);
440 
441 	PMCDBG4(MDP,STA,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
442 	    iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
443 	    iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
444 
445 	return (0);
446 }
447 
448 static int
449 iaf_stop_pmc(int cpu, int ri)
450 {
451 	uint32_t fc;
452 	struct core_cpu *iafc;
453 	uint64_t msr = 0;
454 
455 	PMCDBG2(MDP,STO,1,"iaf-stop cpu=%d ri=%d", cpu, ri);
456 
457 	iafc = core_pcpu[cpu];
458 
459 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
460 	    ("[core,%d] illegal CPU value %d", __LINE__, cpu));
461 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
462 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
463 
464 	fc = (IAF_MASK << (ri * 4));
465 
466 	iafc->pc_iafctrl &= ~fc;
467 
468 	PMCDBG1(MDP,STO,1,"iaf-stop iafctrl=%x", iafc->pc_iafctrl);
469  	msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
470  	wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
471 
472 	do {
473 		iafc->pc_resync = 0;
474 		iafc->pc_globalctrl &= ~(1ULL << (ri + IAF_OFFSET));
475  		msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
476  		wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
477  					     IAF_GLOBAL_CTRL_MASK));
478 	} while (iafc->pc_resync != 0);
479 
480 	PMCDBG4(MDP,STO,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
481 	    iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
482 	    iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
483 
484 	return (0);
485 }
486 
487 static int
488 iaf_write_pmc(int cpu, int ri, pmc_value_t v)
489 {
490 	struct core_cpu *cc;
491 	struct pmc *pm;
492 	uint64_t msr;
493 
494 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
495 	    ("[core,%d] illegal cpu value %d", __LINE__, cpu));
496 	KASSERT(ri >= 0 && ri < core_iaf_npmc,
497 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
498 
499 	cc = core_pcpu[cpu];
500 	pm = cc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
501 
502 	KASSERT(pm,
503 	    ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
504 
505 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
506 		v = iaf_reload_count_to_perfctr_value(v);
507 
508 	/* Turn off fixed counters */
509 	msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
510 	wrmsr(IAF_CTRL, msr);
511 
512 	wrmsr(IAF_CTR0 + ri, v & ((1ULL << core_iaf_width) - 1));
513 
514 	/* Turn on fixed counters */
515 	msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
516 	wrmsr(IAF_CTRL, msr | (cc->pc_iafctrl & IAF_CTRL_MASK));
517 
518 	PMCDBG6(MDP,WRI,1, "iaf-write cpu=%d ri=%d msr=0x%x v=%jx iafctrl=%jx "
519 	    "pmc=%jx", cpu, ri, IAF_RI_TO_MSR(ri), v,
520 	    (uintmax_t) rdmsr(IAF_CTRL),
521 	    (uintmax_t) rdpmc(IAF_RI_TO_MSR(ri)));
522 
523 	return (0);
524 }
525 
526 
527 static void
528 iaf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
529 {
530 	struct pmc_classdep *pcd;
531 
532 	KASSERT(md != NULL, ("[iaf,%d] md is NULL", __LINE__));
533 
534 	PMCDBG0(MDP,INI,1, "iaf-initialize");
535 
536 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF];
537 
538 	pcd->pcd_caps	= IAF_PMC_CAPS;
539 	pcd->pcd_class	= PMC_CLASS_IAF;
540 	pcd->pcd_num	= npmc;
541 	pcd->pcd_ri	= md->pmd_npmc;
542 	pcd->pcd_width	= pmcwidth;
543 
544 	pcd->pcd_allocate_pmc	= iaf_allocate_pmc;
545 	pcd->pcd_config_pmc	= iaf_config_pmc;
546 	pcd->pcd_describe	= iaf_describe;
547 	pcd->pcd_get_config	= iaf_get_config;
548 	pcd->pcd_get_msr	= iaf_get_msr;
549 	pcd->pcd_pcpu_fini	= core_pcpu_noop;
550 	pcd->pcd_pcpu_init	= core_pcpu_noop;
551 	pcd->pcd_read_pmc	= iaf_read_pmc;
552 	pcd->pcd_release_pmc	= iaf_release_pmc;
553 	pcd->pcd_start_pmc	= iaf_start_pmc;
554 	pcd->pcd_stop_pmc	= iaf_stop_pmc;
555 	pcd->pcd_write_pmc	= iaf_write_pmc;
556 
557 	md->pmd_npmc	       += npmc;
558 }
559 
560 /*
561  * Intel programmable PMCs.
562  */
563 
564 /* Sub fields of UMASK that this event supports. */
565 #define	IAP_M_CORE		(1 << 0) /* Core specificity */
566 #define	IAP_M_AGENT		(1 << 1) /* Agent specificity */
567 #define	IAP_M_PREFETCH		(1 << 2) /* Prefetch */
568 #define	IAP_M_MESI		(1 << 3) /* MESI */
569 #define	IAP_M_SNOOPRESPONSE	(1 << 4) /* Snoop response */
570 #define	IAP_M_SNOOPTYPE		(1 << 5) /* Snoop type */
571 #define	IAP_M_TRANSITION	(1 << 6) /* Transition */
572 
573 #define	IAP_F_CORE		(0x3 << 14) /* Core specificity */
574 #define	IAP_F_AGENT		(0x1 << 13) /* Agent specificity */
575 #define	IAP_F_PREFETCH		(0x3 << 12) /* Prefetch */
576 #define	IAP_F_MESI		(0xF <<  8) /* MESI */
577 #define	IAP_F_SNOOPRESPONSE	(0xB <<  8) /* Snoop response */
578 #define	IAP_F_SNOOPTYPE		(0x3 <<  8) /* Snoop type */
579 #define	IAP_F_TRANSITION	(0x1 << 12) /* Transition */
580 
581 #define	IAP_PREFETCH_RESERVED	(0x2 << 12)
582 #define	IAP_CORE_THIS		(0x1 << 14)
583 #define	IAP_CORE_ALL		(0x3 << 14)
584 #define	IAP_F_CMASK		0xFF000000
585 
586 static pmc_value_t
587 iap_perfctr_value_to_reload_count(pmc_value_t v)
588 {
589 
590 	/* If the PMC has overflowed, return a reload count of zero. */
591 	if ((v & (1ULL << (core_iap_width - 1))) == 0)
592 		return (0);
593 	v &= (1ULL << core_iap_width) - 1;
594 	return (1ULL << core_iap_width) - v;
595 }
596 
597 static pmc_value_t
598 iap_reload_count_to_perfctr_value(pmc_value_t rlc)
599 {
600 	return (1ULL << core_iap_width) - rlc;
601 }
602 
603 static int
604 iap_pmc_has_overflowed(int ri)
605 {
606 	uint64_t v;
607 
608 	/*
609 	 * We treat a Core (i.e., Intel architecture v1) PMC as has
610 	 * having overflowed if its MSB is zero.
611 	 */
612 	v = rdpmc(ri);
613 	return ((v & (1ULL << (core_iap_width - 1))) == 0);
614 }
615 
616 static int
617 iap_event_corei7_ok_on_counter(uint8_t evsel, int ri)
618 {
619 	uint32_t mask;
620 
621 	switch (evsel) {
622 		/*
623 		 * Events valid only on counter 0, 1.
624 		 */
625 		case 0x40:
626 		case 0x41:
627 		case 0x42:
628 		case 0x43:
629 		case 0x51:
630 		case 0x63:
631 			mask = 0x3;
632 		break;
633 
634 		default:
635 		mask = ~0;	/* Any row index is ok. */
636 	}
637 
638 	return (mask & (1 << ri));
639 }
640 
641 static int
642 iap_event_westmere_ok_on_counter(uint8_t evsel, int ri)
643 {
644 	uint32_t mask;
645 
646 	switch (evsel) {
647 		/*
648 		 * Events valid only on counter 0.
649 		 */
650 		case 0x60:
651 		case 0xB3:
652 		mask = 0x1;
653 		break;
654 
655 		/*
656 		 * Events valid only on counter 0, 1.
657 		 */
658 		case 0x4C:
659 		case 0x4E:
660 		case 0x51:
661 		case 0x63:
662 		mask = 0x3;
663 		break;
664 
665 	default:
666 		mask = ~0;	/* Any row index is ok. */
667 	}
668 
669 	return (mask & (1 << ri));
670 }
671 
672 static int
673 iap_event_sb_sbx_ib_ibx_ok_on_counter(uint8_t evsel, int ri)
674 {
675 	uint32_t mask;
676 
677 	switch (evsel) {
678 		/* Events valid only on counter 0. */
679     case 0xB7:
680 		mask = 0x1;
681 		break;
682 		/* Events valid only on counter 1. */
683 	case 0xC0:
684 		mask = 0x2;
685 		break;
686 		/* Events valid only on counter 2. */
687 	case 0x48:
688 	case 0xA2:
689 	case 0xA3:
690 		mask = 0x4;
691 		break;
692 		/* Events valid only on counter 3. */
693 	case 0xBB:
694 	case 0xCD:
695 		mask = 0x8;
696 		break;
697 	default:
698 		mask = ~0;	/* Any row index is ok. */
699 	}
700 
701 	return (mask & (1 << ri));
702 }
703 
704 static int
705 iap_event_ok_on_counter(uint8_t evsel, int ri)
706 {
707 	uint32_t mask;
708 
709 	switch (evsel) {
710 		/*
711 		 * Events valid only on counter 0.
712 		 */
713 	case 0x10:
714 	case 0x14:
715 	case 0x18:
716 	case 0xB3:
717 	case 0xC1:
718 	case 0xCB:
719 		mask = (1 << 0);
720 		break;
721 
722 		/*
723 		 * Events valid only on counter 1.
724 		 */
725 	case 0x11:
726 	case 0x12:
727 	case 0x13:
728 		mask = (1 << 1);
729 		break;
730 
731 	default:
732 		mask = ~0;	/* Any row index is ok. */
733 	}
734 
735 	return (mask & (1 << ri));
736 }
737 
738 static int
739 iap_allocate_pmc(int cpu, int ri, struct pmc *pm,
740     const struct pmc_op_pmcallocate *a)
741 {
742 	uint8_t ev;
743 	const struct pmc_md_iap_op_pmcallocate *iap;
744 
745 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
746 	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
747 	KASSERT(ri >= 0 && ri < core_iap_npmc,
748 	    ("[core,%d] illegal row-index value %d", __LINE__, ri));
749 
750 	if (a->pm_class != PMC_CLASS_IAP)
751 		return (EINVAL);
752 
753 	iap = &a->pm_md.pm_iap;
754 	ev = IAP_EVSEL_GET(iap->pm_iap_config);
755 
756 	switch (core_cputype) {
757 	case PMC_CPU_INTEL_COREI7:
758 	case PMC_CPU_INTEL_NEHALEM_EX:
759 		if (iap_event_corei7_ok_on_counter(ev, ri) == 0)
760 			return (EINVAL);
761 		break;
762 	case PMC_CPU_INTEL_SKYLAKE:
763 	case PMC_CPU_INTEL_SKYLAKE_XEON:
764 	case PMC_CPU_INTEL_BROADWELL:
765 	case PMC_CPU_INTEL_BROADWELL_XEON:
766 	case PMC_CPU_INTEL_SANDYBRIDGE:
767 	case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
768 	case PMC_CPU_INTEL_IVYBRIDGE:
769 	case PMC_CPU_INTEL_IVYBRIDGE_XEON:
770 	case PMC_CPU_INTEL_HASWELL:
771 	case PMC_CPU_INTEL_HASWELL_XEON:
772 		if (iap_event_sb_sbx_ib_ibx_ok_on_counter(ev, ri) == 0)
773 			return (EINVAL);
774 		break;
775 	case PMC_CPU_INTEL_WESTMERE:
776 	case PMC_CPU_INTEL_WESTMERE_EX:
777 		if (iap_event_westmere_ok_on_counter(ev, ri) == 0)
778 			return (EINVAL);
779 		break;
780 	default:
781 		if (iap_event_ok_on_counter(ev, ri) == 0)
782 			return (EINVAL);
783 	}
784 
785 	pm->pm_md.pm_iap.pm_iap_evsel = iap->pm_iap_config;
786 	return (0);
787 }
788 
789 static int
790 iap_config_pmc(int cpu, int ri, struct pmc *pm)
791 {
792 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
793 	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
794 
795 	KASSERT(ri >= 0 && ri < core_iap_npmc,
796 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
797 
798 	PMCDBG3(MDP,CFG,1, "iap-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
799 
800 	KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
801 	    cpu));
802 
803 	core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc = pm;
804 
805 	return (0);
806 }
807 
808 static int
809 iap_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
810 {
811 	int error;
812 	struct pmc_hw *phw;
813 	char iap_name[PMC_NAME_MAX];
814 
815 	phw = &core_pcpu[cpu]->pc_corepmcs[ri];
816 
817 	(void) snprintf(iap_name, sizeof(iap_name), "IAP-%d", ri);
818 	if ((error = copystr(iap_name, pi->pm_name, PMC_NAME_MAX,
819 	    NULL)) != 0)
820 		return (error);
821 
822 	pi->pm_class = PMC_CLASS_IAP;
823 
824 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
825 		pi->pm_enabled = TRUE;
826 		*ppmc          = phw->phw_pmc;
827 	} else {
828 		pi->pm_enabled = FALSE;
829 		*ppmc          = NULL;
830 	}
831 
832 	return (0);
833 }
834 
835 static int
836 iap_get_config(int cpu, int ri, struct pmc **ppm)
837 {
838 	*ppm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
839 
840 	return (0);
841 }
842 
843 static int
844 iap_get_msr(int ri, uint32_t *msr)
845 {
846 	KASSERT(ri >= 0 && ri < core_iap_npmc,
847 	    ("[iap,%d] ri %d out of range", __LINE__, ri));
848 
849 	*msr = ri;
850 
851 	return (0);
852 }
853 
854 static int
855 iap_read_pmc(int cpu, int ri, pmc_value_t *v)
856 {
857 	struct pmc *pm;
858 	pmc_value_t tmp;
859 
860 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
861 	    ("[core,%d] illegal cpu value %d", __LINE__, cpu));
862 	KASSERT(ri >= 0 && ri < core_iap_npmc,
863 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
864 
865 	pm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
866 
867 	KASSERT(pm,
868 	    ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
869 		ri));
870 
871 	tmp = rdpmc(ri);
872 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
873 		*v = iap_perfctr_value_to_reload_count(tmp);
874 	else
875 		*v = tmp & ((1ULL << core_iap_width) - 1);
876 
877 	PMCDBG4(MDP,REA,1, "iap-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
878 	    IAP_PMC0 + ri, *v);
879 
880 	return (0);
881 }
882 
883 static int
884 iap_release_pmc(int cpu, int ri, struct pmc *pm)
885 {
886 	(void) pm;
887 
888 	PMCDBG3(MDP,REL,1, "iap-release cpu=%d ri=%d pm=%p", cpu, ri,
889 	    pm);
890 
891 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
892 	    ("[core,%d] illegal CPU value %d", __LINE__, cpu));
893 	KASSERT(ri >= 0 && ri < core_iap_npmc,
894 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
895 
896 	KASSERT(core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc
897 	    == NULL, ("[core,%d] PHW pmc non-NULL", __LINE__));
898 
899 	return (0);
900 }
901 
902 static int
903 iap_start_pmc(int cpu, int ri)
904 {
905 	struct pmc *pm;
906 	uint32_t evsel;
907 	struct core_cpu *cc;
908 
909 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
910 	    ("[core,%d] illegal CPU value %d", __LINE__, cpu));
911 	KASSERT(ri >= 0 && ri < core_iap_npmc,
912 	    ("[core,%d] illegal row-index %d", __LINE__, ri));
913 
914 	cc = core_pcpu[cpu];
915 	pm = cc->pc_corepmcs[ri].phw_pmc;
916 
917 	KASSERT(pm,
918 	    ("[core,%d] starting cpu%d,ri%d with no pmc configured",
919 		__LINE__, cpu, ri));
920 
921 	PMCDBG2(MDP,STA,1, "iap-start cpu=%d ri=%d", cpu, ri);
922 
923 	evsel = pm->pm_md.pm_iap.pm_iap_evsel;
924 
925 	PMCDBG4(MDP,STA,2, "iap-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
926 	    cpu, ri, IAP_EVSEL0 + ri, evsel);
927 
928 	/* Event specific configuration. */
929 
930 	switch (IAP_EVSEL_GET(evsel)) {
931 	case 0xB7:
932 		wrmsr(IA_OFFCORE_RSP0, pm->pm_md.pm_iap.pm_iap_rsp);
933 		break;
934 	case 0xBB:
935 		wrmsr(IA_OFFCORE_RSP1, pm->pm_md.pm_iap.pm_iap_rsp);
936 		break;
937 	default:
938 		break;
939 	}
940 
941 	wrmsr(IAP_EVSEL0 + ri, evsel | IAP_EN);
942 
943 	if (core_cputype == PMC_CPU_INTEL_CORE)
944 		return (0);
945 
946 	do {
947 		cc->pc_resync = 0;
948 		cc->pc_globalctrl |= (1ULL << ri);
949 		wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
950 	} while (cc->pc_resync != 0);
951 
952 	return (0);
953 }
954 
955 static int
956 iap_stop_pmc(int cpu, int ri)
957 {
958 	struct pmc *pm __diagused;
959 	struct core_cpu *cc;
960 	uint64_t msr;
961 
962 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
963 	    ("[core,%d] illegal cpu value %d", __LINE__, cpu));
964 	KASSERT(ri >= 0 && ri < core_iap_npmc,
965 	    ("[core,%d] illegal row index %d", __LINE__, ri));
966 
967 	cc = core_pcpu[cpu];
968 	pm = cc->pc_corepmcs[ri].phw_pmc;
969 
970 	KASSERT(pm,
971 	    ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
972 		cpu, ri));
973 
974 	PMCDBG2(MDP,STO,1, "iap-stop cpu=%d ri=%d", cpu, ri);
975 
976 	msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
977 	wrmsr(IAP_EVSEL0 + ri, msr);	/* stop hw */
978 
979 	if (core_cputype == PMC_CPU_INTEL_CORE)
980 		return (0);
981 
982 	do {
983 		cc->pc_resync = 0;
984 		cc->pc_globalctrl &= ~(1ULL << ri);
985 		msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
986 		wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
987 	} while (cc->pc_resync != 0);
988 
989 	return (0);
990 }
991 
992 static int
993 iap_write_pmc(int cpu, int ri, pmc_value_t v)
994 {
995 	struct pmc *pm;
996 	struct core_cpu *cc;
997 
998 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
999 	    ("[core,%d] illegal cpu value %d", __LINE__, cpu));
1000 	KASSERT(ri >= 0 && ri < core_iap_npmc,
1001 	    ("[core,%d] illegal row index %d", __LINE__, ri));
1002 
1003 	cc = core_pcpu[cpu];
1004 	pm = cc->pc_corepmcs[ri].phw_pmc;
1005 
1006 	KASSERT(pm,
1007 	    ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
1008 		cpu, ri));
1009 
1010 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1011 		v = iap_reload_count_to_perfctr_value(v);
1012 
1013 	v &= (1ULL << core_iap_width) - 1;
1014 
1015 	PMCDBG4(MDP,WRI,1, "iap-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
1016 	    IAP_PMC0 + ri, v);
1017 
1018 	/*
1019 	 * Write the new value to the counter (or it's alias).  The
1020 	 * counter will be in a stopped state when the pcd_write()
1021 	 * entry point is called.
1022 	 */
1023 	wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v);
1024 	return (0);
1025 }
1026 
1027 
1028 static void
1029 iap_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth,
1030     int flags)
1031 {
1032 	struct pmc_classdep *pcd;
1033 
1034 	KASSERT(md != NULL, ("[iap,%d] md is NULL", __LINE__));
1035 
1036 	PMCDBG0(MDP,INI,1, "iap-initialize");
1037 
1038 	/* Remember the set of architectural events supported. */
1039 	core_architectural_events = ~flags;
1040 
1041 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP];
1042 
1043 	pcd->pcd_caps	= IAP_PMC_CAPS;
1044 	pcd->pcd_class	= PMC_CLASS_IAP;
1045 	pcd->pcd_num	= npmc;
1046 	pcd->pcd_ri	= md->pmd_npmc;
1047 	pcd->pcd_width	= pmcwidth;
1048 
1049 	pcd->pcd_allocate_pmc	= iap_allocate_pmc;
1050 	pcd->pcd_config_pmc	= iap_config_pmc;
1051 	pcd->pcd_describe	= iap_describe;
1052 	pcd->pcd_get_config	= iap_get_config;
1053 	pcd->pcd_get_msr	= iap_get_msr;
1054 	pcd->pcd_pcpu_fini	= core_pcpu_fini;
1055 	pcd->pcd_pcpu_init	= core_pcpu_init;
1056 	pcd->pcd_read_pmc	= iap_read_pmc;
1057 	pcd->pcd_release_pmc	= iap_release_pmc;
1058 	pcd->pcd_start_pmc	= iap_start_pmc;
1059 	pcd->pcd_stop_pmc	= iap_stop_pmc;
1060 	pcd->pcd_write_pmc	= iap_write_pmc;
1061 
1062 	md->pmd_npmc	       += npmc;
1063 }
1064 
1065 static int
1066 core_intr(struct trapframe *tf)
1067 {
1068 	pmc_value_t v;
1069 	struct pmc *pm;
1070 	struct core_cpu *cc;
1071 	int error, found_interrupt, ri;
1072 	uint64_t msr;
1073 
1074 	PMCDBG3(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", curcpu, (void *) tf,
1075 	    TRAPF_USERMODE(tf));
1076 
1077 	found_interrupt = 0;
1078 	cc = core_pcpu[curcpu];
1079 
1080 	for (ri = 0; ri < core_iap_npmc; ri++) {
1081 
1082 		if ((pm = cc->pc_corepmcs[ri].phw_pmc) == NULL ||
1083 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1084 			continue;
1085 
1086 		if (!iap_pmc_has_overflowed(ri))
1087 			continue;
1088 
1089 		found_interrupt = 1;
1090 
1091 		if (pm->pm_state != PMC_STATE_RUNNING)
1092 			continue;
1093 
1094 		error = pmc_process_interrupt(PMC_HR, pm, tf);
1095 
1096 		v = pm->pm_sc.pm_reloadcount;
1097 		v = iap_reload_count_to_perfctr_value(v);
1098 
1099 		/*
1100 		 * Stop the counter, reload it but only restart it if
1101 		 * the PMC is not stalled.
1102 		 */
1103 		msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
1104 		wrmsr(IAP_EVSEL0 + ri, msr);
1105 		wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v);
1106 
1107 		if (error)
1108 			continue;
1109 
1110 		wrmsr(IAP_EVSEL0 + ri, msr | (pm->pm_md.pm_iap.pm_iap_evsel |
1111 					      IAP_EN));
1112 	}
1113 
1114 	if (found_interrupt)
1115 		lapic_reenable_pmc();
1116 
1117 	if (found_interrupt)
1118 		counter_u64_add(pmc_stats.pm_intr_processed, 1);
1119 	else
1120 		counter_u64_add(pmc_stats.pm_intr_ignored, 1);
1121 
1122 	return (found_interrupt);
1123 }
1124 
1125 static int
1126 core2_intr(struct trapframe *tf)
1127 {
1128 	int error, found_interrupt, n, cpu;
1129 	uint64_t flag, intrstatus, intrenable, msr;
1130 	struct pmc *pm;
1131 	struct core_cpu *cc;
1132 	pmc_value_t v;
1133 
1134 	cpu = curcpu;
1135 	PMCDBG3(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
1136 	    TRAPF_USERMODE(tf));
1137 
1138 	/*
1139 	 * The IA_GLOBAL_STATUS (MSR 0x38E) register indicates which
1140 	 * PMCs have a pending PMI interrupt.  We take a 'snapshot' of
1141 	 * the current set of interrupting PMCs and process these
1142 	 * after stopping them.
1143 	 */
1144 	intrstatus = rdmsr(IA_GLOBAL_STATUS);
1145 	intrenable = intrstatus & core_pmcmask;
1146 
1147 	PMCDBG2(MDP,INT, 1, "cpu=%d intrstatus=%jx", cpu,
1148 	    (uintmax_t) intrstatus);
1149 
1150 	found_interrupt = 0;
1151 	cc = core_pcpu[cpu];
1152 
1153 	KASSERT(cc != NULL, ("[core,%d] null pcpu", __LINE__));
1154 
1155 	cc->pc_globalctrl &= ~intrenable;
1156 	cc->pc_resync = 1;	/* MSRs now potentially out of sync. */
1157 
1158 	/*
1159 	 * Stop PMCs and clear overflow status bits.
1160 	 */
1161 	msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
1162 	wrmsr(IA_GLOBAL_CTRL, msr);
1163 	wrmsr(IA_GLOBAL_OVF_CTRL, intrenable |
1164 	    IA_GLOBAL_STATUS_FLAG_OVFBUF |
1165 	    IA_GLOBAL_STATUS_FLAG_CONDCHG);
1166 
1167 	/*
1168 	 * Look for interrupts from fixed function PMCs.
1169 	 */
1170 	for (n = 0, flag = (1ULL << IAF_OFFSET); n < core_iaf_npmc;
1171 	     n++, flag <<= 1) {
1172 
1173 		if ((intrstatus & flag) == 0)
1174 			continue;
1175 
1176 		found_interrupt = 1;
1177 
1178 		pm = cc->pc_corepmcs[n + core_iaf_ri].phw_pmc;
1179 		if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
1180 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1181 			continue;
1182 
1183 		error = pmc_process_interrupt(PMC_HR, pm, tf);
1184 
1185 		if (error)
1186 			intrenable &= ~flag;
1187 
1188 		v = iaf_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
1189 
1190 		/* Reload sampling count. */
1191 		wrmsr(IAF_CTR0 + n, v);
1192 
1193 		PMCDBG4(MDP,INT, 1, "iaf-intr cpu=%d error=%d v=%jx(%jx)", curcpu,
1194 		    error, (uintmax_t) v, (uintmax_t) rdpmc(IAF_RI_TO_MSR(n)));
1195 	}
1196 
1197 	/*
1198 	 * Process interrupts from the programmable counters.
1199 	 */
1200 	for (n = 0, flag = 1; n < core_iap_npmc; n++, flag <<= 1) {
1201 		if ((intrstatus & flag) == 0)
1202 			continue;
1203 
1204 		found_interrupt = 1;
1205 
1206 		pm = cc->pc_corepmcs[n].phw_pmc;
1207 		if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
1208 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1209 			continue;
1210 
1211 		error = pmc_process_interrupt(PMC_HR, pm, tf);
1212 		if (error)
1213 			intrenable &= ~flag;
1214 
1215 		v = iap_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
1216 
1217 		PMCDBG3(MDP,INT, 1, "iap-intr cpu=%d error=%d v=%jx", cpu, error,
1218 		    (uintmax_t) v);
1219 
1220 		/* Reload sampling count. */
1221 		wrmsr(core_iap_wroffset + IAP_PMC0 + n, v);
1222 	}
1223 
1224 	/*
1225 	 * Reenable all non-stalled PMCs.
1226 	 */
1227 	PMCDBG2(MDP,INT, 1, "cpu=%d intrenable=%jx", cpu,
1228 	    (uintmax_t) intrenable);
1229 
1230 	cc->pc_globalctrl |= intrenable;
1231 
1232 	wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl & IA_GLOBAL_CTRL_MASK);
1233 
1234 	PMCDBG5(MDP,INT, 1, "cpu=%d fixedctrl=%jx globalctrl=%jx status=%jx "
1235 	    "ovf=%jx", cpu, (uintmax_t) rdmsr(IAF_CTRL),
1236 	    (uintmax_t) rdmsr(IA_GLOBAL_CTRL),
1237 	    (uintmax_t) rdmsr(IA_GLOBAL_STATUS),
1238 	    (uintmax_t) rdmsr(IA_GLOBAL_OVF_CTRL));
1239 
1240 	if (found_interrupt)
1241 		lapic_reenable_pmc();
1242 
1243 	if (found_interrupt)
1244 		counter_u64_add(pmc_stats.pm_intr_processed, 1);
1245 	else
1246 		counter_u64_add(pmc_stats.pm_intr_ignored, 1);
1247 
1248 	return (found_interrupt);
1249 }
1250 
1251 int
1252 pmc_core_initialize(struct pmc_mdep *md, int maxcpu, int version_override)
1253 {
1254 	int cpuid[CORE_CPUID_REQUEST_SIZE];
1255 	int ipa_version, flags, nflags;
1256 
1257 	do_cpuid(CORE_CPUID_REQUEST, cpuid);
1258 
1259 	ipa_version = (version_override > 0) ? version_override :
1260 	    cpuid[CORE_CPUID_EAX] & 0xFF;
1261 	core_cputype = md->pmd_cputype;
1262 
1263 	PMCDBG3(MDP,INI,1,"core-init cputype=%d ncpu=%d ipa-version=%d",
1264 	    core_cputype, maxcpu, ipa_version);
1265 
1266 	if (ipa_version < 1 || ipa_version > 4 ||
1267 	    (core_cputype != PMC_CPU_INTEL_CORE && ipa_version == 1)) {
1268 		/* Unknown PMC architecture. */
1269 		printf("hwpc_core: unknown PMC architecture: %d\n",
1270 		    ipa_version);
1271 		return (EPROGMISMATCH);
1272 	}
1273 
1274 	core_iap_wroffset = 0;
1275 	if (cpu_feature2 & CPUID2_PDCM) {
1276 		if (rdmsr(IA32_PERF_CAPABILITIES) & PERFCAP_FW_WRITE) {
1277 			PMCDBG0(MDP, INI, 1,
1278 			    "core-init full-width write supported");
1279 			core_iap_wroffset = IAP_A_PMC0 - IAP_PMC0;
1280 		} else
1281 			PMCDBG0(MDP, INI, 1,
1282 			    "core-init full-width write NOT supported");
1283 	} else
1284 		PMCDBG0(MDP, INI, 1, "core-init pdcm not supported");
1285 
1286 	core_pmcmask = 0;
1287 
1288 	/*
1289 	 * Initialize programmable counters.
1290 	 */
1291 	core_iap_npmc = (cpuid[CORE_CPUID_EAX] >> 8) & 0xFF;
1292 	core_iap_width = (cpuid[CORE_CPUID_EAX] >> 16) & 0xFF;
1293 
1294 	core_pmcmask |= ((1ULL << core_iap_npmc) - 1);
1295 
1296 	nflags = (cpuid[CORE_CPUID_EAX] >> 24) & 0xFF;
1297 	flags = cpuid[CORE_CPUID_EBX] & ((1 << nflags) - 1);
1298 
1299 	iap_initialize(md, maxcpu, core_iap_npmc, core_iap_width, flags);
1300 
1301 	/*
1302 	 * Initialize fixed function counters, if present.
1303 	 */
1304 	if (core_cputype != PMC_CPU_INTEL_CORE) {
1305 		core_iaf_ri = core_iap_npmc;
1306 		core_iaf_npmc = cpuid[CORE_CPUID_EDX] & 0x1F;
1307 		core_iaf_width = (cpuid[CORE_CPUID_EDX] >> 5) & 0xFF;
1308 
1309 		iaf_initialize(md, maxcpu, core_iaf_npmc, core_iaf_width);
1310 		core_pmcmask |= ((1ULL << core_iaf_npmc) - 1) << IAF_OFFSET;
1311 	}
1312 
1313 	PMCDBG2(MDP,INI,1,"core-init pmcmask=0x%jx iafri=%d", core_pmcmask,
1314 	    core_iaf_ri);
1315 
1316 	core_pcpu = malloc(sizeof(*core_pcpu) * maxcpu, M_PMC,
1317 	    M_ZERO | M_WAITOK);
1318 
1319 	/*
1320 	 * Choose the appropriate interrupt handler.
1321 	 */
1322 	if (ipa_version == 1)
1323 		md->pmd_intr = core_intr;
1324 	else
1325 		md->pmd_intr = core2_intr;
1326 
1327 	md->pmd_pcpu_fini = NULL;
1328 	md->pmd_pcpu_init = NULL;
1329 
1330 	return (0);
1331 }
1332 
1333 void
1334 pmc_core_finalize(struct pmc_mdep *md)
1335 {
1336 	PMCDBG0(MDP,INI,1, "core-finalize");
1337 
1338 	free(core_pcpu, M_PMC);
1339 	core_pcpu = NULL;
1340 }
1341