xref: /freebsd/sys/sys/pmc.h (revision 38a52bd3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008, Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #ifndef _SYS_PMC_H_
36 #define	_SYS_PMC_H_
37 
38 #include <dev/hwpmc/pmc_events.h>
39 #include <sys/proc.h>
40 #include <sys/counter.h>
41 #include <machine/pmc_mdep.h>
42 #include <machine/profile.h>
43 #ifdef _KERNEL
44 #include <sys/epoch.h>
45 #include <ck_queue.h>
46 #endif
47 
48 #define	PMC_MODULE_NAME		"hwpmc"
49 #define	PMC_NAME_MAX		64 /* HW counter name size */
50 #define	PMC_CLASS_MAX		8  /* max #classes of PMCs per-system */
51 
52 /*
53  * Kernel<->userland API version number [MMmmpppp]
54  *
55  * Major numbers are to be incremented when an incompatible change to
56  * the ABI occurs that older clients will not be able to handle.
57  *
58  * Minor numbers are incremented when a backwards compatible change
59  * occurs that allows older correct programs to run unchanged.  For
60  * example, when support for a new PMC type is added.
61  *
62  * The patch version is incremented for every bug fix.
63  */
64 #define	PMC_VERSION_MAJOR	0x09
65 #define	PMC_VERSION_MINOR	0x03
66 #define	PMC_VERSION_PATCH	0x0000
67 
68 #define	PMC_VERSION		(PMC_VERSION_MAJOR << 24 |		\
69 	PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
70 
71 #define PMC_CPUID_LEN 64
72 /* cpu model name for pmu lookup */
73 extern char pmc_cpuid[PMC_CPUID_LEN];
74 
75 /*
76  * Kinds of CPUs known.
77  *
78  * We keep track of CPU variants that need to be distinguished in
79  * some way for PMC operations.  CPU names are grouped by manufacturer
80  * and numbered sparsely in order to minimize changes to the ABI involved
81  * when new CPUs are added.
82  */
83 
84 #define	__PMC_CPUS()						\
85 	__PMC_CPU(AMD_K7,	0x00,	"AMD K7")		\
86 	__PMC_CPU(AMD_K8,	0x01,	"AMD K8")		\
87 	__PMC_CPU(INTEL_P5,	0x80,	"Intel Pentium")	\
88 	__PMC_CPU(INTEL_P6,	0x81,	"Intel Pentium Pro")	\
89 	__PMC_CPU(INTEL_CL,	0x82,	"Intel Celeron")	\
90 	__PMC_CPU(INTEL_PII,	0x83,	"Intel Pentium II")	\
91 	__PMC_CPU(INTEL_PIII,	0x84,	"Intel Pentium III")	\
92 	__PMC_CPU(INTEL_PM,	0x85,	"Intel Pentium M")	\
93 	__PMC_CPU(INTEL_PIV,	0x86,	"Intel Pentium IV")	\
94 	__PMC_CPU(INTEL_CORE,	0x87,	"Intel Core Solo/Duo")	\
95 	__PMC_CPU(INTEL_CORE2,	0x88,	"Intel Core2")		\
96 	__PMC_CPU(INTEL_CORE2EXTREME,	0x89,	"Intel Core2 Extreme")	\
97 	__PMC_CPU(INTEL_ATOM,	0x8A,	"Intel Atom")		\
98 	__PMC_CPU(INTEL_COREI7, 0x8B,   "Intel Core i7")	\
99 	__PMC_CPU(INTEL_WESTMERE, 0x8C,   "Intel Westmere")	\
100 	__PMC_CPU(INTEL_SANDYBRIDGE, 0x8D,   "Intel Sandy Bridge")	\
101 	__PMC_CPU(INTEL_IVYBRIDGE, 0x8E,   "Intel Ivy Bridge")	\
102 	__PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F,   "Intel Sandy Bridge Xeon")	\
103 	__PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90,   "Intel Ivy Bridge Xeon")	\
104 	__PMC_CPU(INTEL_HASWELL, 0x91,   "Intel Haswell")	\
105 	__PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92,	"Intel Atom Silvermont")    \
106 	__PMC_CPU(INTEL_NEHALEM_EX, 0x93,   "Intel Nehalem Xeon 7500")	\
107 	__PMC_CPU(INTEL_WESTMERE_EX, 0x94,   "Intel Westmere Xeon E7")	\
108 	__PMC_CPU(INTEL_HASWELL_XEON, 0x95,   "Intel Haswell Xeon E5 v3") \
109 	__PMC_CPU(INTEL_BROADWELL, 0x96,   "Intel Broadwell") \
110 	__PMC_CPU(INTEL_BROADWELL_XEON, 0x97,   "Intel Broadwell Xeon") \
111 	__PMC_CPU(INTEL_SKYLAKE, 0x98,   "Intel Skylake")		\
112 	__PMC_CPU(INTEL_SKYLAKE_XEON, 0x99,   "Intel Skylake Xeon")	\
113 	__PMC_CPU(INTEL_ATOM_GOLDMONT, 0x9A,   "Intel Atom Goldmont")	\
114 	__PMC_CPU(INTEL_ICELAKE, 0x9B,	"Intel Icelake")		\
115 	__PMC_CPU(INTEL_ICELAKE_XEON, 0x9C,	"Intel Icelake Xeon")	\
116 	__PMC_CPU(INTEL_ALDERLAKE, 0x9D,	"Intel Alderlake")	\
117 	__PMC_CPU(INTEL_ATOM_GOLDMONT_P, 0x9E,	"Intel Atom Goldmont Plus")    \
118 	__PMC_CPU(INTEL_ATOM_TREMONT, 0x9F,	"Intel Atom Tremont")    \
119 	__PMC_CPU(INTEL_XSCALE,	0x100,	"Intel XScale")		\
120 	__PMC_CPU(MIPS_24K,     0x200,  "MIPS 24K")		\
121 	__PMC_CPU(MIPS_OCTEON,  0x201,  "Cavium Octeon")	\
122 	__PMC_CPU(MIPS_74K,     0x202,  "MIPS 74K")		\
123 	__PMC_CPU(MIPS_BERI,	0x203,  "BERI")			\
124 	__PMC_CPU(PPC_7450,     0x300,  "PowerPC MPC7450")	\
125 	__PMC_CPU(PPC_E500,     0x340,  "PowerPC e500 Core")	\
126 	__PMC_CPU(PPC_970,      0x380,  "IBM PowerPC 970")	\
127 	__PMC_CPU(PPC_POWER8,   0x390,  "IBM POWER8")		\
128 	__PMC_CPU(GENERIC, 	0x400,  "Generic")		\
129 	__PMC_CPU(ARMV7_CORTEX_A5,	0x500,	"ARMv7 Cortex A5")	\
130 	__PMC_CPU(ARMV7_CORTEX_A7,	0x501,	"ARMv7 Cortex A7")	\
131 	__PMC_CPU(ARMV7_CORTEX_A8,	0x502,	"ARMv7 Cortex A8")	\
132 	__PMC_CPU(ARMV7_CORTEX_A9,	0x503,	"ARMv7 Cortex A9")	\
133 	__PMC_CPU(ARMV7_CORTEX_A15,	0x504,	"ARMv7 Cortex A15")	\
134 	__PMC_CPU(ARMV7_CORTEX_A17,	0x505,	"ARMv7 Cortex A17")	\
135 	__PMC_CPU(ARMV8_CORTEX_A53,	0x600,	"ARMv8 Cortex A53")	\
136 	__PMC_CPU(ARMV8_CORTEX_A57,	0x601,	"ARMv8 Cortex A57")	\
137 	__PMC_CPU(ARMV8_CORTEX_A76,	0x602,	"ARMv8 Cortex A76")
138 
139 enum pmc_cputype {
140 #undef	__PMC_CPU
141 #define	__PMC_CPU(S,V,D)	PMC_CPU_##S = V,
142 	__PMC_CPUS()
143 };
144 
145 #define	PMC_CPU_FIRST	PMC_CPU_AMD_K7
146 #define	PMC_CPU_LAST	PMC_CPU_ARMV8_CORTEX_A76
147 
148 /*
149  * Classes of PMCs
150  */
151 
152 #define	__PMC_CLASSES()							\
153 	__PMC_CLASS(TSC,	0x00,	"CPU Timestamp counter")	\
154 	__PMC_CLASS(K7,		0x01,	"AMD K7 performance counters")	\
155 	__PMC_CLASS(K8,		0x02,	"AMD K8 performance counters")	\
156 	__PMC_CLASS(P5,		0x03,	"Intel Pentium counters")	\
157 	__PMC_CLASS(P6,		0x04,	"Intel Pentium Pro counters")	\
158 	__PMC_CLASS(P4,		0x05,	"Intel Pentium-IV counters")	\
159 	__PMC_CLASS(IAF,	0x06,	"Intel Core2/Atom, fixed function") \
160 	__PMC_CLASS(IAP,	0x07,	"Intel Core...Atom, programmable") \
161 	__PMC_CLASS(UCF,	0x08,	"Intel Uncore fixed function")	\
162 	__PMC_CLASS(UCP,	0x09,	"Intel Uncore programmable")	\
163 	__PMC_CLASS(XSCALE,	0x0A,	"Intel XScale counters")	\
164 	__PMC_CLASS(MIPS24K,	0x0B,	"MIPS 24K")			\
165 	__PMC_CLASS(OCTEON,	0x0C,	"Cavium Octeon")		\
166 	__PMC_CLASS(PPC7450,	0x0D,	"Motorola MPC7450 class")	\
167 	__PMC_CLASS(PPC970,	0x0E,	"IBM PowerPC 970 class")	\
168 	__PMC_CLASS(SOFT,	0x0F,	"Software events")		\
169 	__PMC_CLASS(ARMV7,	0x10,	"ARMv7")			\
170 	__PMC_CLASS(ARMV8,	0x11,	"ARMv8")			\
171 	__PMC_CLASS(MIPS74K,	0x12,	"MIPS 74K")			\
172 	__PMC_CLASS(E500,	0x13,	"Freescale e500 class")		\
173 	__PMC_CLASS(BERI,	0x14,	"MIPS BERI")			\
174 	__PMC_CLASS(POWER8,	0x15,	"IBM POWER8 class")		\
175 	__PMC_CLASS(DMC620_PMU_CD2, 0x16, "ARM DMC620 Memory Controller PMU CLKDIV2") \
176 	__PMC_CLASS(DMC620_PMU_C, 0x17, "ARM DMC620 Memory Controller PMU CLK") \
177 	__PMC_CLASS(CMN600_PMU, 0x18,	"Arm CoreLink CMN600 Coherent Mesh Network PMU")
178 
179 enum pmc_class {
180 #undef  __PMC_CLASS
181 #define	__PMC_CLASS(S,V,D)	PMC_CLASS_##S = V,
182 	__PMC_CLASSES()
183 };
184 
185 #define	PMC_CLASS_FIRST	PMC_CLASS_TSC
186 #define	PMC_CLASS_LAST	PMC_CLASS_CMN600_PMU
187 
188 /*
189  * A PMC can be in the following states:
190  *
191  * Hardware states:
192  *   DISABLED   -- administratively prohibited from being used.
193  *   FREE       -- HW available for use
194  * Software states:
195  *   ALLOCATED  -- allocated
196  *   STOPPED    -- allocated, but not counting events
197  *   RUNNING    -- allocated, and in operation; 'pm_runcount'
198  *                 holds the number of CPUs using this PMC at
199  *                 a given instant
200  *   DELETED    -- being destroyed
201  */
202 
203 #define	__PMC_HWSTATES()			\
204 	__PMC_STATE(DISABLED)			\
205 	__PMC_STATE(FREE)
206 
207 #define	__PMC_SWSTATES()			\
208 	__PMC_STATE(ALLOCATED)			\
209 	__PMC_STATE(STOPPED)			\
210 	__PMC_STATE(RUNNING)			\
211 	__PMC_STATE(DELETED)
212 
213 #define	__PMC_STATES()				\
214 	__PMC_HWSTATES()			\
215 	__PMC_SWSTATES()
216 
217 enum pmc_state {
218 #undef	__PMC_STATE
219 #define	__PMC_STATE(S)	PMC_STATE_##S,
220 	__PMC_STATES()
221 	__PMC_STATE(MAX)
222 };
223 
224 #define	PMC_STATE_FIRST	PMC_STATE_DISABLED
225 #define	PMC_STATE_LAST	PMC_STATE_DELETED
226 
227 /*
228  * An allocated PMC may used as a 'global' counter or as a
229  * 'thread-private' one.  Each such mode of use can be in either
230  * statistical sampling mode or in counting mode.  Thus a PMC in use
231  *
232  * SS i.e., SYSTEM STATISTICAL  -- system-wide statistical profiling
233  * SC i.e., SYSTEM COUNTER      -- system-wide counting mode
234  * TS i.e., THREAD STATISTICAL  -- thread virtual, statistical profiling
235  * TC i.e., THREAD COUNTER      -- thread virtual, counting mode
236  *
237  * Statistical profiling modes rely on the PMC periodically delivering
238  * a interrupt to the CPU (when the configured number of events have
239  * been measured), so the PMC must have the ability to generate
240  * interrupts.
241  *
242  * In counting modes, the PMC counts its configured events, with the
243  * value of the PMC being read whenever needed by its owner process.
244  *
245  * The thread specific modes "virtualize" the PMCs -- the PMCs appear
246  * to be thread private and count events only when the profiled thread
247  * actually executes on the CPU.
248  *
249  * The system-wide "global" modes keep the PMCs running all the time
250  * and are used to measure the behaviour of the whole system.
251  */
252 
253 #define	__PMC_MODES()				\
254 	__PMC_MODE(SS,	0)			\
255 	__PMC_MODE(SC,	1)			\
256 	__PMC_MODE(TS,	2)			\
257 	__PMC_MODE(TC,	3)
258 
259 enum pmc_mode {
260 #undef	__PMC_MODE
261 #define	__PMC_MODE(M,N)	PMC_MODE_##M = N,
262 	__PMC_MODES()
263 };
264 
265 #define	PMC_MODE_FIRST	PMC_MODE_SS
266 #define	PMC_MODE_LAST	PMC_MODE_TC
267 
268 #define	PMC_IS_COUNTING_MODE(mode)				\
269 	((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
270 #define	PMC_IS_SYSTEM_MODE(mode)				\
271 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
272 #define	PMC_IS_SAMPLING_MODE(mode)				\
273 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
274 #define	PMC_IS_VIRTUAL_MODE(mode)				\
275 	((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
276 
277 /*
278  * PMC row disposition
279  */
280 
281 #define	__PMC_DISPOSITIONS(N)					\
282 	__PMC_DISP(STANDALONE)	/* global/disabled counters */	\
283 	__PMC_DISP(FREE)	/* free/available */		\
284 	__PMC_DISP(THREAD)	/* thread-virtual PMCs */	\
285 	__PMC_DISP(UNKNOWN)	/* sentinel */
286 
287 enum pmc_disp {
288 #undef	__PMC_DISP
289 #define	__PMC_DISP(D)	PMC_DISP_##D ,
290 	__PMC_DISPOSITIONS()
291 };
292 
293 #define	PMC_DISP_FIRST	PMC_DISP_STANDALONE
294 #define	PMC_DISP_LAST	PMC_DISP_THREAD
295 
296 /*
297  * Counter capabilities
298  *
299  * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
300  */
301 
302 #define	__PMC_CAPS()							\
303 	__PMC_CAP(INTERRUPT,	0, "generate interrupts")		\
304 	__PMC_CAP(USER,		1, "count user-mode events")		\
305 	__PMC_CAP(SYSTEM,	2, "count system-mode events")		\
306 	__PMC_CAP(EDGE,		3, "do edge detection of events")	\
307 	__PMC_CAP(THRESHOLD,	4, "ignore events below a threshold")	\
308 	__PMC_CAP(READ,		5, "read PMC counter")			\
309 	__PMC_CAP(WRITE,	6, "reprogram PMC counter")		\
310 	__PMC_CAP(INVERT,	7, "invert comparison sense")		\
311 	__PMC_CAP(QUALIFIER,	8, "further qualify monitored events")	\
312 	__PMC_CAP(PRECISE,	9, "perform precise sampling")		\
313 	__PMC_CAP(TAGGING,	10, "tag upstream events")		\
314 	__PMC_CAP(CASCADE,	11, "cascade counters")			\
315 	__PMC_CAP(SYSWIDE,	12, "system wide counter")		\
316 	__PMC_CAP(DOMWIDE,	13, "NUMA domain wide counter")
317 
318 enum pmc_caps
319 {
320 #undef	__PMC_CAP
321 #define	__PMC_CAP(NAME, VALUE, DESCR)	PMC_CAP_##NAME = (1 << VALUE) ,
322 	__PMC_CAPS()
323 };
324 
325 #define	PMC_CAP_FIRST		PMC_CAP_INTERRUPT
326 #define	PMC_CAP_LAST		PMC_CAP_DOMWIDE
327 
328 /*
329  * PMC Event Numbers
330  *
331  * These are generated from the definitions in "dev/hwpmc/pmc_events.h".
332  */
333 
334 enum pmc_event {
335 #undef	__PMC_EV
336 #undef	__PMC_EV_BLOCK
337 #define	__PMC_EV_BLOCK(C,V)	PMC_EV_ ## C ## __BLOCK_START = (V) - 1 ,
338 #define	__PMC_EV(C,N)		PMC_EV_ ## C ## _ ## N ,
339 	__PMC_EVENTS()
340 };
341 
342 /*
343  * PMC SYSCALL INTERFACE
344  */
345 
346 /*
347  * "PMC_OPS" -- these are the commands recognized by the kernel
348  * module, and are used when performing a system call from userland.
349  */
350 #define	__PMC_OPS()							\
351 	__PMC_OP(CONFIGURELOG, "Set log file")				\
352 	__PMC_OP(FLUSHLOG, "Flush log file")				\
353 	__PMC_OP(GETCPUINFO, "Get system CPU information")		\
354 	__PMC_OP(GETDRIVERSTATS, "Get driver statistics")		\
355 	__PMC_OP(GETMODULEVERSION, "Get module version")		\
356 	__PMC_OP(GETPMCINFO, "Get per-cpu PMC information")		\
357 	__PMC_OP(PMCADMIN, "Set PMC state")				\
358 	__PMC_OP(PMCALLOCATE, "Allocate and configure a PMC")		\
359 	__PMC_OP(PMCATTACH, "Attach a PMC to a process")		\
360 	__PMC_OP(PMCDETACH, "Detach a PMC from a process")		\
361 	__PMC_OP(PMCGETMSR, "Get a PMC's hardware address")		\
362 	__PMC_OP(PMCRELEASE, "Release a PMC")				\
363 	__PMC_OP(PMCRW, "Read/Set a PMC")				\
364 	__PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate")	\
365 	__PMC_OP(PMCSTART, "Start a PMC")				\
366 	__PMC_OP(PMCSTOP, "Stop a PMC")					\
367 	__PMC_OP(WRITELOG, "Write a cookie to the log file")		\
368 	__PMC_OP(CLOSELOG, "Close log file")				\
369 	__PMC_OP(GETDYNEVENTINFO, "Get dynamic events list")
370 
371 enum pmc_ops {
372 #undef	__PMC_OP
373 #define	__PMC_OP(N, D)	PMC_OP_##N,
374 	__PMC_OPS()
375 };
376 
377 /*
378  * Flags used in operations on PMCs.
379  */
380 
381 #define	PMC_F_UNUSED1		0x00000001 /* unused */
382 #define	PMC_F_DESCENDANTS	0x00000002 /*OP ALLOCATE track descendants */
383 #define	PMC_F_LOG_PROCCSW	0x00000004 /*OP ALLOCATE track ctx switches */
384 #define	PMC_F_LOG_PROCEXIT	0x00000008 /*OP ALLOCATE log proc exits */
385 #define	PMC_F_NEWVALUE		0x00000010 /*OP RW write new value */
386 #define	PMC_F_OLDVALUE		0x00000020 /*OP RW get old value */
387 
388 /* V2 API */
389 #define	PMC_F_CALLCHAIN		0x00000080 /*OP ALLOCATE capture callchains */
390 #define	PMC_F_USERCALLCHAIN	0x00000100 /*OP ALLOCATE use userspace stack */
391 
392 /* internal flags */
393 #define	PMC_F_ATTACHED_TO_OWNER	0x00010000 /*attached to owner*/
394 #define	PMC_F_NEEDS_LOGFILE	0x00020000 /*needs log file */
395 #define	PMC_F_ATTACH_DONE	0x00040000 /*attached at least once */
396 
397 #define	PMC_CALLCHAIN_DEPTH_MAX	512
398 
399 #define	PMC_CC_F_USERSPACE	0x01	   /*userspace callchain*/
400 
401 /*
402  * Cookies used to denote allocated PMCs, and the values of PMCs.
403  */
404 
405 typedef uint32_t	pmc_id_t;
406 typedef uint64_t	pmc_value_t;
407 
408 #define	PMC_ID_INVALID		(~ (pmc_id_t) 0)
409 
410 /*
411  * PMC IDs have the following format:
412  *
413  * +-----------------------+-------+-----------+
414  * |   CPU      | PMC MODE | CLASS | ROW INDEX |
415  * +-----------------------+-------+-----------+
416  *
417  * where CPU is 12 bits, MODE 4, CLASS 8, and ROW INDEX 8  Field 'CPU'
418  * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for
419  * process-mode PMCs.  Field 'PMC MODE' is the allocated PMC mode.
420  * Field 'PMC CLASS' is the class of the PMC.  Field 'ROW INDEX' is the
421  * row index for the PMC.
422  *
423  * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total
424  * number of hardware PMCs on this cpu.
425  */
426 
427 #define	PMC_ID_TO_ROWINDEX(ID)	((ID) & 0xFF)
428 #define	PMC_ID_TO_CLASS(ID)	(((ID) & 0xFF00) >> 8)
429 #define	PMC_ID_TO_MODE(ID)	(((ID) & 0xF0000) >> 16)
430 #define	PMC_ID_TO_CPU(ID)	(((ID) & 0xFFF00000) >> 20)
431 #define	PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX)			\
432 	((((CPU) & 0xFFF) << 20) | (((MODE) & 0xF) << 16) |	\
433 	(((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF))
434 
435 /*
436  * Data structures for system calls supported by the pmc driver.
437  */
438 
439 /*
440  * OP PMCALLOCATE
441  *
442  * Allocate a PMC on the named CPU.
443  */
444 
445 #define	PMC_CPU_ANY	~0
446 
447 struct pmc_op_pmcallocate {
448 	uint32_t	pm_caps;	/* PMC_CAP_* */
449 	uint32_t	pm_cpu;		/* CPU number or PMC_CPU_ANY */
450 	enum pmc_class	pm_class;	/* class of PMC desired */
451 	enum pmc_event	pm_ev;		/* [enum pmc_event] desired */
452 	uint32_t	pm_flags;	/* additional modifiers PMC_F_* */
453 	enum pmc_mode	pm_mode;	/* desired mode */
454 	pmc_id_t	pm_pmcid;	/* [return] process pmc id */
455 	pmc_value_t	pm_count;	/* initial/sample count */
456 
457 	union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */
458 };
459 
460 /*
461  * OP PMCADMIN
462  *
463  * Set the administrative state (i.e., whether enabled or disabled) of
464  * a PMC 'pm_pmc' on CPU 'pm_cpu'.  Note that 'pm_pmc' specifies an
465  * absolute PMC number and need not have been first allocated by the
466  * calling process.
467  */
468 
469 struct pmc_op_pmcadmin {
470 	int		pm_cpu;		/* CPU# */
471 	uint32_t	pm_flags;	/* flags */
472 	int		pm_pmc;         /* PMC# */
473 	enum pmc_state  pm_state;	/* desired state */
474 };
475 
476 /*
477  * OP PMCATTACH / OP PMCDETACH
478  *
479  * Attach/detach a PMC and a process.
480  */
481 
482 struct pmc_op_pmcattach {
483 	pmc_id_t	pm_pmc;		/* PMC to attach to */
484 	pid_t		pm_pid;		/* target process */
485 };
486 
487 /*
488  * OP PMCSETCOUNT
489  *
490  * Set the sampling rate (i.e., the reload count) for statistical counters.
491  * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
492  */
493 
494 struct pmc_op_pmcsetcount {
495 	pmc_value_t	pm_count;	/* initial/sample count */
496 	pmc_id_t	pm_pmcid;	/* PMC id to set */
497 };
498 
499 /*
500  * OP PMCRW
501  *
502  * Read the value of a PMC named by 'pm_pmcid'.  'pm_pmcid' needs
503  * to have been previously allocated using PMCALLOCATE.
504  */
505 
506 struct pmc_op_pmcrw {
507 	uint32_t	pm_flags;	/* PMC_F_{OLD,NEW}VALUE*/
508 	pmc_id_t	pm_pmcid;	/* pmc id */
509 	pmc_value_t	pm_value;	/* new&returned value */
510 };
511 
512 /*
513  * OP GETPMCINFO
514  *
515  * retrieve PMC state for a named CPU.  The caller is expected to
516  * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
517  * values.
518  */
519 
520 struct pmc_info {
521 	char		pm_name[PMC_NAME_MAX]; /* pmc name */
522 	enum pmc_class	pm_class;	/* enum pmc_class */
523 	int		pm_enabled;	/* whether enabled */
524 	enum pmc_disp	pm_rowdisp;	/* FREE, THREAD or STANDLONE */
525 	pid_t		pm_ownerpid;	/* owner, or -1 */
526 	enum pmc_mode	pm_mode;	/* current mode [enum pmc_mode] */
527 	enum pmc_event	pm_event;	/* current event */
528 	uint32_t	pm_flags;	/* current flags */
529 	pmc_value_t	pm_reloadcount;	/* sampling counters only */
530 };
531 
532 struct pmc_op_getpmcinfo {
533 	int32_t		pm_cpu;		/* 0 <= cpu < mp_maxid */
534 	struct pmc_info	pm_pmcs[];	/* space for 'npmc' structures */
535 };
536 
537 /*
538  * OP GETCPUINFO
539  *
540  * Retrieve system CPU information.
541  */
542 
543 struct pmc_classinfo {
544 	enum pmc_class	pm_class;	/* class id */
545 	uint32_t	pm_caps;	/* counter capabilities */
546 	uint32_t	pm_width;	/* width of the PMC */
547 	uint32_t	pm_num;		/* number of PMCs in class */
548 };
549 
550 struct pmc_op_getcpuinfo {
551 	enum pmc_cputype pm_cputype; /* what kind of CPU */
552 	uint32_t	pm_ncpu;    /* max CPU number */
553 	uint32_t	pm_npmc;    /* #PMCs per CPU */
554 	uint32_t	pm_nclass;  /* #classes of PMCs */
555 	struct pmc_classinfo  pm_classes[PMC_CLASS_MAX];
556 };
557 
558 /*
559  * OP CONFIGURELOG
560  *
561  * Configure a log file for writing system-wide statistics to.
562  */
563 
564 struct pmc_op_configurelog {
565 	int		pm_flags;
566 	int		pm_logfd;   /* logfile fd (or -1) */
567 };
568 
569 /*
570  * OP GETDRIVERSTATS
571  *
572  * Retrieve pmc(4) driver-wide statistics.
573  */
574 #ifdef _KERNEL
575 struct pmc_driverstats {
576 	counter_u64_t	pm_intr_ignored;	/* #interrupts ignored */
577 	counter_u64_t	pm_intr_processed;	/* #interrupts processed */
578 	counter_u64_t	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
579 	counter_u64_t	pm_syscalls;		/* #syscalls */
580 	counter_u64_t	pm_syscall_errors;	/* #syscalls with errors */
581 	counter_u64_t	pm_buffer_requests;	/* #buffer requests */
582 	counter_u64_t	pm_buffer_requests_failed; /* #failed buffer requests */
583 	counter_u64_t	pm_log_sweeps;		/* #sample buffer processing
584 						   passes */
585 	counter_u64_t	pm_merges;		/* merged k+u */
586 	counter_u64_t	pm_overwrites;		/* UR overwrites */
587 };
588 #endif
589 
590 struct pmc_op_getdriverstats {
591 	unsigned int	pm_intr_ignored;	/* #interrupts ignored */
592 	unsigned int	pm_intr_processed;	/* #interrupts processed */
593 	unsigned int	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
594 	unsigned int	pm_syscalls;		/* #syscalls */
595 	unsigned int	pm_syscall_errors;	/* #syscalls with errors */
596 	unsigned int	pm_buffer_requests;	/* #buffer requests */
597 	unsigned int	pm_buffer_requests_failed; /* #failed buffer requests */
598 	unsigned int	pm_log_sweeps;		/* #sample buffer processing
599 						   passes */
600 };
601 
602 /*
603  * OP RELEASE / OP START / OP STOP
604  *
605  * Simple operations on a PMC id.
606  */
607 
608 struct pmc_op_simple {
609 	pmc_id_t	pm_pmcid;
610 };
611 
612 /*
613  * OP WRITELOG
614  *
615  * Flush the current log buffer and write 4 bytes of user data to it.
616  */
617 
618 struct pmc_op_writelog {
619 	uint32_t	pm_userdata;
620 };
621 
622 /*
623  * OP GETMSR
624  *
625  * Retrieve the machine specific address associated with the allocated
626  * PMC.  This number can be used subsequently with a read-performance-counter
627  * instruction.
628  */
629 
630 struct pmc_op_getmsr {
631 	uint32_t	pm_msr;		/* machine specific address */
632 	pmc_id_t	pm_pmcid;	/* allocated pmc id */
633 };
634 
635 /*
636  * OP GETDYNEVENTINFO
637  *
638  * Retrieve a PMC dynamic class events list.
639  */
640 
641 struct pmc_dyn_event_descr {
642 	char		pm_ev_name[PMC_NAME_MAX];
643 	enum pmc_event	pm_ev_code;
644 };
645 
646 struct pmc_op_getdyneventinfo {
647 	enum pmc_class			pm_class;
648 	unsigned int			pm_nevent;
649 	struct pmc_dyn_event_descr	pm_events[PMC_EV_DYN_COUNT];
650 };
651 
652 #ifdef _KERNEL
653 
654 #include <sys/malloc.h>
655 #include <sys/sysctl.h>
656 #include <sys/_cpuset.h>
657 
658 #include <machine/frame.h>
659 
660 #define	PMC_HASH_SIZE				1024
661 #define	PMC_MTXPOOL_SIZE			2048
662 #define	PMC_LOG_BUFFER_SIZE			256
663 #define	PMC_NLOGBUFFERS_PCPU			32
664 #define	PMC_NSAMPLES				256
665 #define	PMC_CALLCHAIN_DEPTH			128
666 #define	PMC_THREADLIST_MAX			128
667 
668 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
669 
670 /*
671  * Locking keys
672  *
673  * (b) - pmc_bufferlist_mtx (spin lock)
674  * (k) - pmc_kthread_mtx (sleep lock)
675  * (o) - po->po_mtx (spin lock)
676  * (g) - global_epoch_preempt (epoch)
677  * (p) - pmc_sx (sx)
678  */
679 
680 /*
681  * PMC commands
682  */
683 
684 struct pmc_syscall_args {
685 	register_t	pmop_code;	/* one of PMC_OP_* */
686 	void		*pmop_data;	/* syscall parameter */
687 };
688 
689 /*
690  * Interface to processor specific s1tuff
691  */
692 
693 /*
694  * struct pmc_descr
695  *
696  * Machine independent (i.e., the common parts) of a human readable
697  * PMC description.
698  */
699 
700 struct pmc_descr {
701 	char		pd_name[PMC_NAME_MAX]; /* name */
702 	uint32_t	pd_caps;	/* capabilities */
703 	enum pmc_class	pd_class;	/* class of the PMC */
704 	uint32_t	pd_width;	/* width in bits */
705 };
706 
707 /*
708  * struct pmc_target
709  *
710  * This structure records all the target processes associated with a
711  * PMC.
712  */
713 
714 struct pmc_target {
715 	LIST_ENTRY(pmc_target)	pt_next;
716 	struct pmc_process	*pt_process; /* target descriptor */
717 };
718 
719 /*
720  * struct pmc
721  *
722  * Describes each allocated PMC.
723  *
724  * Each PMC has precisely one owner, namely the process that allocated
725  * the PMC.
726  *
727  * A PMC may be attached to multiple target processes.  The
728  * 'pm_targets' field links all the target processes being monitored
729  * by this PMC.
730  *
731  * The 'pm_savedvalue' field is protected by a mutex.
732  *
733  * On a multi-cpu machine, multiple target threads associated with a
734  * process-virtual PMC could be concurrently executing on different
735  * CPUs.  The 'pm_runcount' field is atomically incremented every time
736  * the PMC gets scheduled on a CPU and atomically decremented when it
737  * get descheduled.  Deletion of a PMC is only permitted when this
738  * field is '0'.
739  *
740  */
741 struct pmc_pcpu_state {
742 	uint32_t pps_overflowcnt;	/* count overflow interrupts */
743 	uint8_t pps_stalled;
744 	uint8_t pps_cpustate;
745 } __aligned(CACHE_LINE_SIZE);
746 struct pmc {
747 	LIST_HEAD(,pmc_target)	pm_targets;	/* list of target processes */
748 	LIST_ENTRY(pmc)		pm_next;	/* owner's list */
749 
750 	/*
751 	 * System-wide PMCs are allocated on a CPU and are not moved
752 	 * around.  For system-wide PMCs we record the CPU the PMC was
753 	 * allocated on in the 'CPU' field of the pmc ID.
754 	 *
755 	 * Virtual PMCs run on whichever CPU is currently executing
756 	 * their targets' threads.  For these PMCs we need to save
757 	 * their current PMC counter values when they are taken off
758 	 * CPU.
759 	 */
760 
761 	union {
762 		pmc_value_t	pm_savedvalue;	/* Virtual PMCS */
763 	} pm_gv;
764 
765 	/*
766 	 * For sampling mode PMCs, we keep track of the PMC's "reload
767 	 * count", which is the counter value to be loaded in when
768 	 * arming the PMC for the next counting session.  For counting
769 	 * modes on PMCs that are read-only (e.g., the x86 TSC), we
770 	 * keep track of the initial value at the start of
771 	 * counting-mode operation.
772 	 */
773 
774 	union {
775 		pmc_value_t	pm_reloadcount;	/* sampling PMC modes */
776 		pmc_value_t	pm_initial;	/* counting PMC modes */
777 	} pm_sc;
778 
779 	struct pmc_pcpu_state *pm_pcpu_state;
780 	volatile cpuset_t pm_cpustate;	/* CPUs where PMC should be active */
781 	uint32_t	pm_caps;	/* PMC capabilities */
782 	enum pmc_event	pm_event;	/* event being measured */
783 	uint32_t	pm_flags;	/* additional flags PMC_F_... */
784 	struct pmc_owner *pm_owner;	/* owner thread state */
785 	counter_u64_t		pm_runcount;	/* #cpus currently on */
786 	enum pmc_state	pm_state;	/* current PMC state */
787 
788 	/*
789 	 * The PMC ID field encodes the row-index for the PMC, its
790 	 * mode, class and the CPU# associated with the PMC.
791 	 */
792 
793 	pmc_id_t	pm_id;		/* allocated PMC id */
794 	enum pmc_class pm_class;
795 
796 	/* md extensions */
797 	union pmc_md_pmc	pm_md;
798 };
799 
800 /*
801  * Accessor macros for 'struct pmc'
802  */
803 
804 #define	PMC_TO_MODE(P)		PMC_ID_TO_MODE((P)->pm_id)
805 #define	PMC_TO_CLASS(P)		PMC_ID_TO_CLASS((P)->pm_id)
806 #define	PMC_TO_ROWINDEX(P)	PMC_ID_TO_ROWINDEX((P)->pm_id)
807 #define	PMC_TO_CPU(P)		PMC_ID_TO_CPU((P)->pm_id)
808 
809 /*
810  * struct pmc_threadpmcstate
811  *
812  * Record per-PMC, per-thread state.
813  */
814 struct pmc_threadpmcstate {
815 	pmc_value_t	pt_pmcval;	/* per-thread reload count */
816 };
817 
818 /*
819  * struct pmc_thread
820  *
821  * Record a 'target' thread being profiled.
822  */
823 struct pmc_thread {
824 	LIST_ENTRY(pmc_thread) pt_next;		/* linked list */
825 	struct thread	*pt_td;			/* target thread */
826 	struct pmc_threadpmcstate pt_pmcs[];	/* per-PMC state */
827 };
828 
829 /*
830  * struct pmc_process
831  *
832  * Record a 'target' process being profiled.
833  *
834  * The target process being profiled could be different from the owner
835  * process which allocated the PMCs.  Each target process descriptor
836  * is associated with NHWPMC 'struct pmc *' pointers.  Each PMC at a
837  * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
838  * array.  The size of this structure is thus PMC architecture
839  * dependent.
840  *
841  */
842 
843 struct pmc_targetstate {
844 	struct pmc	*pp_pmc;   /* target PMC */
845 	pmc_value_t	pp_pmcval; /* per-process value */
846 };
847 
848 struct pmc_process {
849 	LIST_ENTRY(pmc_process) pp_next;	/* hash chain */
850 	LIST_HEAD(,pmc_thread) pp_tds;		/* list of threads */
851 	struct mtx	*pp_tdslock;		/* lock on pp_tds thread list */
852 	int		pp_refcnt;		/* reference count */
853 	uint32_t	pp_flags;		/* flags PMC_PP_* */
854 	struct proc	*pp_proc;		/* target process */
855 	struct pmc_targetstate pp_pmcs[];       /* NHWPMCs */
856 };
857 
858 #define	PMC_PP_ENABLE_MSR_ACCESS	0x00000001
859 
860 /*
861  * struct pmc_owner
862  *
863  * We associate a PMC with an 'owner' process.
864  *
865  * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
866  * lifetime, where NCPUS is the numbers of CPUS in the system and
867  * NHWPMC is the number of hardware PMCs per CPU.  These are
868  * maintained in the list headed by the 'po_pmcs' to save on space.
869  *
870  */
871 
872 struct pmc_owner  {
873 	LIST_ENTRY(pmc_owner)	po_next;	/* hash chain */
874 	CK_LIST_ENTRY(pmc_owner)	po_ssnext;	/* (g/p) list of SS PMC owners */
875 	LIST_HEAD(, pmc)	po_pmcs;	/* owned PMC list */
876 	TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */
877 	struct mtx		po_mtx;		/* spin lock for (o) */
878 	struct proc		*po_owner;	/* owner proc */
879 	uint32_t		po_flags;	/* (k) flags PMC_PO_* */
880 	struct proc		*po_kthread;	/* (k) helper kthread */
881 	struct file		*po_file;	/* file reference */
882 	int			po_error;	/* recorded error */
883 	short			po_sscount;	/* # SS PMCs owned */
884 	short			po_logprocmaps;	/* global mappings done */
885 	struct pmclog_buffer	*po_curbuf[MAXCPU];	/* current log buffer */
886 };
887 
888 #define	PMC_PO_OWNS_LOGFILE		0x00000001 /* has a log file */
889 #define	PMC_PO_SHUTDOWN			0x00000010 /* in the process of shutdown */
890 #define	PMC_PO_INITIAL_MAPPINGS_DONE	0x00000020
891 
892 /*
893  * struct pmc_hw -- describe the state of the PMC hardware
894  *
895  * When in use, a HW PMC is associated with one allocated 'struct pmc'
896  * pointed to by field 'phw_pmc'.  When inactive, this field is NULL.
897  *
898  * On an SMP box, one or more HW PMC's in process virtual mode with
899  * the same 'phw_pmc' could be executing on different CPUs.  In order
900  * to handle this case correctly, we need to ensure that only
901  * incremental counts get added to the saved value in the associated
902  * 'struct pmc'.  The 'phw_save' field is used to keep the saved PMC
903  * value at the time the hardware is started during this context
904  * switch (i.e., the difference between the new (hardware) count and
905  * the saved count is atomically added to the count field in 'struct
906  * pmc' at context switch time).
907  *
908  */
909 
910 struct pmc_hw {
911 	uint32_t	phw_state;	/* see PHW_* macros below */
912 	struct pmc	*phw_pmc;	/* current thread PMC */
913 };
914 
915 #define	PMC_PHW_RI_MASK		0x000000FF
916 #define	PMC_PHW_CPU_SHIFT	8
917 #define	PMC_PHW_CPU_MASK	0x0000FF00
918 #define	PMC_PHW_FLAGS_SHIFT	16
919 #define	PMC_PHW_FLAGS_MASK	0xFFFF0000
920 
921 #define	PMC_PHW_INDEX_TO_STATE(ri)	((ri) & PMC_PHW_RI_MASK)
922 #define	PMC_PHW_STATE_TO_INDEX(state)	((state) & PMC_PHW_RI_MASK)
923 #define	PMC_PHW_CPU_TO_STATE(cpu)	(((cpu) << PMC_PHW_CPU_SHIFT) & \
924 	PMC_PHW_CPU_MASK)
925 #define	PMC_PHW_STATE_TO_CPU(state)	(((state) & PMC_PHW_CPU_MASK) >> \
926 	PMC_PHW_CPU_SHIFT)
927 #define	PMC_PHW_FLAGS_TO_STATE(flags)	(((flags) << PMC_PHW_FLAGS_SHIFT) & \
928 	PMC_PHW_FLAGS_MASK)
929 #define	PMC_PHW_STATE_TO_FLAGS(state)	(((state) & PMC_PHW_FLAGS_MASK) >> \
930 	PMC_PHW_FLAGS_SHIFT)
931 #define	PMC_PHW_FLAG_IS_ENABLED		(PMC_PHW_FLAGS_TO_STATE(0x01))
932 #define	PMC_PHW_FLAG_IS_SHAREABLE	(PMC_PHW_FLAGS_TO_STATE(0x02))
933 
934 /*
935  * struct pmc_sample
936  *
937  * Space for N (tunable) PC samples and associated control data.
938  */
939 
940 struct pmc_sample {
941 	uint16_t		ps_nsamples;	/* callchain depth */
942 	uint16_t		ps_nsamples_actual;
943 	uint16_t		ps_cpu;		/* cpu number */
944 	uint16_t		ps_flags;	/* other flags */
945 	lwpid_t			ps_tid;		/* thread id */
946 	pid_t			ps_pid;		/* process PID or -1 */
947 	int		ps_ticks; /* ticks at sample time */
948 	/* pad */
949 	struct thread		*ps_td;		/* which thread */
950 	struct pmc		*ps_pmc;	/* interrupting PMC */
951 	uintptr_t		*ps_pc;		/* (const) callchain start */
952 	uint64_t		ps_tsc;		/* tsc value */
953 };
954 
955 #define 	PMC_SAMPLE_FREE		((uint16_t) 0)
956 #define 	PMC_USER_CALLCHAIN_PENDING	((uint16_t) 0xFFFF)
957 
958 struct pmc_samplebuffer {
959 	volatile uint64_t		ps_prodidx; /* producer index */
960 	volatile uint64_t		ps_considx; /* consumer index */
961 	uintptr_t		*ps_callchains;	/* all saved call chains */
962 	struct pmc_sample	ps_samples[];	/* array of sample entries */
963 };
964 
965 #define PMC_CONS_SAMPLE(psb)					\
966 	(&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask])
967 
968 #define PMC_CONS_SAMPLE_OFF(psb, off)							\
969 	(&(psb)->ps_samples[(off) & pmc_sample_mask])
970 
971 #define PMC_PROD_SAMPLE(psb)					\
972 	(&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask])
973 
974 /*
975  * struct pmc_cpustate
976  *
977  * A CPU is modelled as a collection of HW PMCs with space for additional
978  * flags.
979  */
980 
981 struct pmc_cpu {
982 	uint32_t	pc_state;	/* physical cpu number + flags */
983 	struct pmc_samplebuffer *pc_sb[3]; /* space for samples */
984 	struct pmc_hw	*pc_hwpmcs[];	/* 'npmc' pointers */
985 };
986 
987 #define	PMC_PCPU_CPU_MASK		0x000000FF
988 #define	PMC_PCPU_FLAGS_MASK		0xFFFFFF00
989 #define	PMC_PCPU_FLAGS_SHIFT		8
990 #define	PMC_PCPU_STATE_TO_CPU(S)	((S) & PMC_PCPU_CPU_MASK)
991 #define	PMC_PCPU_STATE_TO_FLAGS(S)	(((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
992 #define	PMC_PCPU_FLAGS_TO_STATE(F)	(((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
993 #define	PMC_PCPU_CPU_TO_STATE(C)	((C) & PMC_PCPU_CPU_MASK)
994 #define	PMC_PCPU_FLAG_HTT		(PMC_PCPU_FLAGS_TO_STATE(0x1))
995 
996 /*
997  * struct pmc_binding
998  *
999  * CPU binding information.
1000  */
1001 
1002 struct pmc_binding {
1003 	int	pb_bound;	/* is bound? */
1004 	int	pb_cpu;		/* if so, to which CPU */
1005 	u_char	pb_priority;	/* Thread active priority. */
1006 };
1007 
1008 struct pmc_mdep;
1009 
1010 /*
1011  * struct pmc_classdep
1012  *
1013  * PMC class-dependent operations.
1014  */
1015 struct pmc_classdep {
1016 	uint32_t	pcd_caps;	/* class capabilities */
1017 	enum pmc_class	pcd_class;	/* class id */
1018 	int		pcd_num;	/* number of PMCs */
1019 	int		pcd_ri;		/* row index of the first PMC in class */
1020 	int		pcd_width;	/* width of the PMC */
1021 
1022 	/* configuring/reading/writing the hardware PMCs */
1023 	int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
1024 	int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm);
1025 	int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value);
1026 	int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value);
1027 
1028 	/* pmc allocation/release */
1029 	int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
1030 		const struct pmc_op_pmcallocate *_a);
1031 	int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
1032 
1033 	/* starting and stopping PMCs */
1034 	int (*pcd_start_pmc)(int _cpu, int _ri);
1035 	int (*pcd_stop_pmc)(int _cpu, int _ri);
1036 
1037 	/* description */
1038 	int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
1039 		struct pmc **_ppmc);
1040 
1041 	/* class-dependent initialization & finalization */
1042 	int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1043 	int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1044 
1045 	/* machine-specific interface */
1046 	int (*pcd_get_msr)(int _ri, uint32_t *_msr);
1047 };
1048 
1049 /*
1050  * struct pmc_mdep
1051  *
1052  * Machine dependent bits needed per CPU type.
1053  */
1054 
1055 struct pmc_mdep  {
1056 	uint32_t	pmd_cputype;    /* from enum pmc_cputype */
1057 	uint32_t	pmd_npmc;	/* number of PMCs per CPU */
1058 	uint32_t	pmd_nclass;	/* number of PMC classes present */
1059 
1060 	/*
1061 	 * Machine dependent methods.
1062 	 */
1063 
1064 	/* per-cpu initialization and finalization */
1065 	int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1066 	int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1067 
1068 	/* thread context switch in/out */
1069 	int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp);
1070 	int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp);
1071 
1072 	/* handle a PMC interrupt */
1073 	int (*pmd_intr)(struct trapframe *_tf);
1074 
1075 	/*
1076 	 * PMC class dependent information.
1077 	 */
1078 	struct pmc_classdep pmd_classdep[];
1079 };
1080 
1081 /*
1082  * Per-CPU state.  This is an array of 'mp_ncpu' pointers
1083  * to struct pmc_cpu descriptors.
1084  */
1085 
1086 extern struct pmc_cpu **pmc_pcpu;
1087 
1088 /* driver statistics */
1089 extern struct pmc_driverstats pmc_stats;
1090 
1091 #if	defined(HWPMC_DEBUG)
1092 #include <sys/ktr.h>
1093 
1094 /* debug flags, major flag groups */
1095 struct pmc_debugflags {
1096 	int	pdb_CPU;
1097 	int	pdb_CSW;
1098 	int	pdb_LOG;
1099 	int	pdb_MDP;
1100 	int	pdb_MOD;
1101 	int	pdb_OWN;
1102 	int	pdb_PMC;
1103 	int	pdb_PRC;
1104 	int	pdb_SAM;
1105 };
1106 
1107 extern struct pmc_debugflags pmc_debugflags;
1108 
1109 #define	KTR_PMC			KTR_SUBSYS
1110 
1111 #define	PMC_DEBUG_STRSIZE		128
1112 #define	PMC_DEBUG_DEFAULT_FLAGS		{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }
1113 
1114 #define	PMCDBG0(M, N, L, F) do {					\
1115 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1116 		CTR0(KTR_PMC, #M ":" #N ":" #L  ": " F);		\
1117 } while (0)
1118 #define	PMCDBG1(M, N, L, F, p1) do {					\
1119 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1120 		CTR1(KTR_PMC, #M ":" #N ":" #L  ": " F, p1);		\
1121 } while (0)
1122 #define	PMCDBG2(M, N, L, F, p1, p2) do {				\
1123 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1124 		CTR2(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2);	\
1125 } while (0)
1126 #define	PMCDBG3(M, N, L, F, p1, p2, p3) do {				\
1127 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1128 		CTR3(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3);	\
1129 } while (0)
1130 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4) do {			\
1131 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1132 		CTR4(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4);\
1133 } while (0)
1134 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do {			\
1135 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1136 		CTR5(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1137 		    p5);						\
1138 } while (0)
1139 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do {		\
1140 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1141 		CTR6(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1142 		    p5, p6);						\
1143 } while (0)
1144 
1145 /* Major numbers */
1146 #define	PMC_DEBUG_MAJ_CPU		0 /* cpu switches */
1147 #define	PMC_DEBUG_MAJ_CSW		1 /* context switches */
1148 #define	PMC_DEBUG_MAJ_LOG		2 /* logging */
1149 #define	PMC_DEBUG_MAJ_MDP		3 /* machine dependent */
1150 #define	PMC_DEBUG_MAJ_MOD		4 /* misc module infrastructure */
1151 #define	PMC_DEBUG_MAJ_OWN		5 /* owner */
1152 #define	PMC_DEBUG_MAJ_PMC		6 /* pmc management */
1153 #define	PMC_DEBUG_MAJ_PRC		7 /* processes */
1154 #define	PMC_DEBUG_MAJ_SAM		8 /* sampling */
1155 
1156 /* Minor numbers */
1157 
1158 /* Common (8 bits) */
1159 #define	PMC_DEBUG_MIN_ALL		0 /* allocation */
1160 #define	PMC_DEBUG_MIN_REL		1 /* release */
1161 #define	PMC_DEBUG_MIN_OPS		2 /* ops: start, stop, ... */
1162 #define	PMC_DEBUG_MIN_INI		3 /* init */
1163 #define	PMC_DEBUG_MIN_FND		4 /* find */
1164 
1165 /* MODULE */
1166 #define	PMC_DEBUG_MIN_PMH	       14 /* pmc_hook */
1167 #define	PMC_DEBUG_MIN_PMS	       15 /* pmc_syscall */
1168 
1169 /* OWN */
1170 #define	PMC_DEBUG_MIN_ORM		8 /* owner remove */
1171 #define	PMC_DEBUG_MIN_OMR		9 /* owner maybe remove */
1172 
1173 /* PROCESSES */
1174 #define	PMC_DEBUG_MIN_TLK		8 /* link target */
1175 #define	PMC_DEBUG_MIN_TUL		9 /* unlink target */
1176 #define	PMC_DEBUG_MIN_EXT	       10 /* process exit */
1177 #define	PMC_DEBUG_MIN_EXC	       11 /* process exec */
1178 #define	PMC_DEBUG_MIN_FRK	       12 /* process fork */
1179 #define	PMC_DEBUG_MIN_ATT	       13 /* attach/detach */
1180 #define	PMC_DEBUG_MIN_SIG	       14 /* signalling */
1181 
1182 /* CONTEXT SWITCHES */
1183 #define	PMC_DEBUG_MIN_SWI		8 /* switch in */
1184 #define	PMC_DEBUG_MIN_SWO		9 /* switch out */
1185 
1186 /* PMC */
1187 #define	PMC_DEBUG_MIN_REG		8 /* pmc register */
1188 #define	PMC_DEBUG_MIN_ALR		9 /* allocate row */
1189 
1190 /* MACHINE DEPENDENT LAYER */
1191 #define	PMC_DEBUG_MIN_REA		8 /* read */
1192 #define	PMC_DEBUG_MIN_WRI		9 /* write */
1193 #define	PMC_DEBUG_MIN_CFG	       10 /* config */
1194 #define	PMC_DEBUG_MIN_STA	       11 /* start */
1195 #define	PMC_DEBUG_MIN_STO	       12 /* stop */
1196 #define	PMC_DEBUG_MIN_INT	       13 /* interrupts */
1197 
1198 /* CPU */
1199 #define	PMC_DEBUG_MIN_BND		8 /* bind */
1200 #define	PMC_DEBUG_MIN_SEL		9 /* select */
1201 
1202 /* LOG */
1203 #define	PMC_DEBUG_MIN_GTB		8 /* get buf */
1204 #define	PMC_DEBUG_MIN_SIO		9 /* schedule i/o */
1205 #define	PMC_DEBUG_MIN_FLS	       10 /* flush */
1206 #define	PMC_DEBUG_MIN_SAM	       11 /* sample */
1207 #define	PMC_DEBUG_MIN_CLO	       12 /* close */
1208 
1209 #else
1210 #define	PMCDBG0(M, N, L, F)		/* nothing */
1211 #define	PMCDBG1(M, N, L, F, p1)
1212 #define	PMCDBG2(M, N, L, F, p1, p2)
1213 #define	PMCDBG3(M, N, L, F, p1, p2, p3)
1214 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4)
1215 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5)
1216 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6)
1217 #endif
1218 
1219 /* declare a dedicated memory pool */
1220 MALLOC_DECLARE(M_PMC);
1221 
1222 /*
1223  * Functions
1224  */
1225 
1226 struct pmc_mdep *pmc_md_initialize(void);	/* MD init function */
1227 void	pmc_md_finalize(struct pmc_mdep *_md);	/* MD fini function */
1228 int	pmc_getrowdisp(int _ri);
1229 int	pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf);
1230 int	pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples,
1231     struct trapframe *_tf);
1232 int	pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples,
1233     struct trapframe *_tf);
1234 void	pmc_restore_cpu_binding(struct pmc_binding *pb);
1235 void	pmc_save_cpu_binding(struct pmc_binding *pb);
1236 void	pmc_select_cpu(int cpu);
1237 struct pmc_mdep *pmc_mdep_alloc(int nclasses);
1238 void pmc_mdep_free(struct pmc_mdep *md);
1239 uint64_t pmc_rdtsc(void);
1240 #endif /* _KERNEL */
1241 #endif /* _SYS_PMC_H_ */
1242