xref: /freebsd/sys/sys/pmc.h (revision 190cef3d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008, Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #ifndef _SYS_PMC_H_
36 #define	_SYS_PMC_H_
37 
38 #include <dev/hwpmc/pmc_events.h>
39 #include <sys/proc.h>
40 #include <sys/counter.h>
41 #include <machine/pmc_mdep.h>
42 #include <machine/profile.h>
43 #ifdef _KERNEL
44 #include <sys/epoch.h>
45 #include <ck_queue.h>
46 #endif
47 
48 #define	PMC_MODULE_NAME		"hwpmc"
49 #define	PMC_NAME_MAX		64 /* HW counter name size */
50 #define	PMC_CLASS_MAX		8  /* max #classes of PMCs per-system */
51 
52 /*
53  * Kernel<->userland API version number [MMmmpppp]
54  *
55  * Major numbers are to be incremented when an incompatible change to
56  * the ABI occurs that older clients will not be able to handle.
57  *
58  * Minor numbers are incremented when a backwards compatible change
59  * occurs that allows older correct programs to run unchanged.  For
60  * example, when support for a new PMC type is added.
61  *
62  * The patch version is incremented for every bug fix.
63  */
64 #define	PMC_VERSION_MAJOR	0x09
65 #define	PMC_VERSION_MINOR	0x03
66 #define	PMC_VERSION_PATCH	0x0000
67 
68 #define	PMC_VERSION		(PMC_VERSION_MAJOR << 24 |		\
69 	PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
70 
71 #define PMC_CPUID_LEN 64
72 /* cpu model name for pmu lookup */
73 extern char pmc_cpuid[PMC_CPUID_LEN];
74 
75 /*
76  * Kinds of CPUs known.
77  *
78  * We keep track of CPU variants that need to be distinguished in
79  * some way for PMC operations.  CPU names are grouped by manufacturer
80  * and numbered sparsely in order to minimize changes to the ABI involved
81  * when new CPUs are added.
82  */
83 
84 #define	__PMC_CPUS()						\
85 	__PMC_CPU(AMD_K7,	0x00,	"AMD K7")		\
86 	__PMC_CPU(AMD_K8,	0x01,	"AMD K8")		\
87 	__PMC_CPU(INTEL_P5,	0x80,	"Intel Pentium")	\
88 	__PMC_CPU(INTEL_P6,	0x81,	"Intel Pentium Pro")	\
89 	__PMC_CPU(INTEL_CL,	0x82,	"Intel Celeron")	\
90 	__PMC_CPU(INTEL_PII,	0x83,	"Intel Pentium II")	\
91 	__PMC_CPU(INTEL_PIII,	0x84,	"Intel Pentium III")	\
92 	__PMC_CPU(INTEL_PM,	0x85,	"Intel Pentium M")	\
93 	__PMC_CPU(INTEL_PIV,	0x86,	"Intel Pentium IV")	\
94 	__PMC_CPU(INTEL_CORE,	0x87,	"Intel Core Solo/Duo")	\
95 	__PMC_CPU(INTEL_CORE2,	0x88,	"Intel Core2")		\
96 	__PMC_CPU(INTEL_CORE2EXTREME,	0x89,	"Intel Core2 Extreme")	\
97 	__PMC_CPU(INTEL_ATOM,	0x8A,	"Intel Atom")		\
98 	__PMC_CPU(INTEL_COREI7, 0x8B,   "Intel Core i7")	\
99 	__PMC_CPU(INTEL_WESTMERE, 0x8C,   "Intel Westmere")	\
100 	__PMC_CPU(INTEL_SANDYBRIDGE, 0x8D,   "Intel Sandy Bridge")	\
101 	__PMC_CPU(INTEL_IVYBRIDGE, 0x8E,   "Intel Ivy Bridge")	\
102 	__PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F,   "Intel Sandy Bridge Xeon")	\
103 	__PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90,   "Intel Ivy Bridge Xeon")	\
104 	__PMC_CPU(INTEL_HASWELL, 0x91,   "Intel Haswell")	\
105 	__PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92,	"Intel Atom Silvermont")    \
106 	__PMC_CPU(INTEL_NEHALEM_EX, 0x93,   "Intel Nehalem Xeon 7500")	\
107 	__PMC_CPU(INTEL_WESTMERE_EX, 0x94,   "Intel Westmere Xeon E7")	\
108 	__PMC_CPU(INTEL_HASWELL_XEON, 0x95,   "Intel Haswell Xeon E5 v3") \
109 	__PMC_CPU(INTEL_BROADWELL, 0x96,   "Intel Broadwell") \
110 	__PMC_CPU(INTEL_BROADWELL_XEON, 0x97,   "Intel Broadwell Xeon") \
111 	__PMC_CPU(INTEL_SKYLAKE, 0x98,   "Intel Skylake")		\
112 	__PMC_CPU(INTEL_SKYLAKE_XEON, 0x99,   "Intel Skylake Xeon")	\
113 	__PMC_CPU(INTEL_XSCALE,	0x100,	"Intel XScale")		\
114 	__PMC_CPU(MIPS_24K,     0x200,  "MIPS 24K")		\
115 	__PMC_CPU(MIPS_OCTEON,  0x201,  "Cavium Octeon")	\
116 	__PMC_CPU(MIPS_74K,     0x202,  "MIPS 74K")		\
117 	__PMC_CPU(PPC_7450,     0x300,  "PowerPC MPC7450")	\
118 	__PMC_CPU(PPC_E500,     0x340,  "PowerPC e500 Core")	\
119 	__PMC_CPU(PPC_970,      0x380,  "IBM PowerPC 970")	\
120 	__PMC_CPU(GENERIC, 	0x400,  "Generic")		\
121 	__PMC_CPU(ARMV7_CORTEX_A5,	0x500,	"ARMv7 Cortex A5")	\
122 	__PMC_CPU(ARMV7_CORTEX_A7,	0x501,	"ARMv7 Cortex A7")	\
123 	__PMC_CPU(ARMV7_CORTEX_A8,	0x502,	"ARMv7 Cortex A8")	\
124 	__PMC_CPU(ARMV7_CORTEX_A9,	0x503,	"ARMv7 Cortex A9")	\
125 	__PMC_CPU(ARMV7_CORTEX_A15,	0x504,	"ARMv7 Cortex A15")	\
126 	__PMC_CPU(ARMV7_CORTEX_A17,	0x505,	"ARMv7 Cortex A17")	\
127 	__PMC_CPU(ARMV8_CORTEX_A53,	0x600,	"ARMv8 Cortex A53")	\
128 	__PMC_CPU(ARMV8_CORTEX_A57,	0x601,	"ARMv8 Cortex A57")
129 
130 enum pmc_cputype {
131 #undef	__PMC_CPU
132 #define	__PMC_CPU(S,V,D)	PMC_CPU_##S = V,
133 	__PMC_CPUS()
134 };
135 
136 #define	PMC_CPU_FIRST	PMC_CPU_AMD_K7
137 #define	PMC_CPU_LAST	PMC_CPU_GENERIC
138 
139 /*
140  * Classes of PMCs
141  */
142 
143 #define	__PMC_CLASSES()							\
144 	__PMC_CLASS(TSC,	0x00,	"CPU Timestamp counter")	\
145 	__PMC_CLASS(K7,		0x01,	"AMD K7 performance counters")	\
146 	__PMC_CLASS(K8,		0x02,	"AMD K8 performance counters")	\
147 	__PMC_CLASS(P5,		0x03,	"Intel Pentium counters")	\
148 	__PMC_CLASS(P6,		0x04,	"Intel Pentium Pro counters")	\
149 	__PMC_CLASS(P4,		0x05,	"Intel Pentium-IV counters")	\
150 	__PMC_CLASS(IAF,	0x06,	"Intel Core2/Atom, fixed function") \
151 	__PMC_CLASS(IAP,	0x07,	"Intel Core...Atom, programmable") \
152 	__PMC_CLASS(UCF,	0x08,	"Intel Uncore fixed function")	\
153 	__PMC_CLASS(UCP,	0x09,	"Intel Uncore programmable")	\
154 	__PMC_CLASS(XSCALE,	0x0A,	"Intel XScale counters")	\
155 	__PMC_CLASS(MIPS24K,	0x0B,	"MIPS 24K")			\
156 	__PMC_CLASS(OCTEON,	0x0C,	"Cavium Octeon")		\
157 	__PMC_CLASS(PPC7450,	0x0D,	"Motorola MPC7450 class")	\
158 	__PMC_CLASS(PPC970,	0x0E,	"IBM PowerPC 970 class")	\
159 	__PMC_CLASS(SOFT,	0x0F,	"Software events")		\
160 	__PMC_CLASS(ARMV7,	0x10,	"ARMv7")			\
161 	__PMC_CLASS(ARMV8,	0x11,	"ARMv8")			\
162 	__PMC_CLASS(MIPS74K,	0x12,	"MIPS 74K")			\
163 	__PMC_CLASS(E500,	0x13,	"Freescale e500 class")
164 
165 enum pmc_class {
166 #undef  __PMC_CLASS
167 #define	__PMC_CLASS(S,V,D)	PMC_CLASS_##S = V,
168 	__PMC_CLASSES()
169 };
170 
171 #define	PMC_CLASS_FIRST	PMC_CLASS_TSC
172 #define	PMC_CLASS_LAST	PMC_CLASS_E500
173 
174 /*
175  * A PMC can be in the following states:
176  *
177  * Hardware states:
178  *   DISABLED   -- administratively prohibited from being used.
179  *   FREE       -- HW available for use
180  * Software states:
181  *   ALLOCATED  -- allocated
182  *   STOPPED    -- allocated, but not counting events
183  *   RUNNING    -- allocated, and in operation; 'pm_runcount'
184  *                 holds the number of CPUs using this PMC at
185  *                 a given instant
186  *   DELETED    -- being destroyed
187  */
188 
189 #define	__PMC_HWSTATES()			\
190 	__PMC_STATE(DISABLED)			\
191 	__PMC_STATE(FREE)
192 
193 #define	__PMC_SWSTATES()			\
194 	__PMC_STATE(ALLOCATED)			\
195 	__PMC_STATE(STOPPED)			\
196 	__PMC_STATE(RUNNING)			\
197 	__PMC_STATE(DELETED)
198 
199 #define	__PMC_STATES()				\
200 	__PMC_HWSTATES()			\
201 	__PMC_SWSTATES()
202 
203 enum pmc_state {
204 #undef	__PMC_STATE
205 #define	__PMC_STATE(S)	PMC_STATE_##S,
206 	__PMC_STATES()
207 	__PMC_STATE(MAX)
208 };
209 
210 #define	PMC_STATE_FIRST	PMC_STATE_DISABLED
211 #define	PMC_STATE_LAST	PMC_STATE_DELETED
212 
213 /*
214  * An allocated PMC may used as a 'global' counter or as a
215  * 'thread-private' one.  Each such mode of use can be in either
216  * statistical sampling mode or in counting mode.  Thus a PMC in use
217  *
218  * SS i.e., SYSTEM STATISTICAL  -- system-wide statistical profiling
219  * SC i.e., SYSTEM COUNTER      -- system-wide counting mode
220  * TS i.e., THREAD STATISTICAL  -- thread virtual, statistical profiling
221  * TC i.e., THREAD COUNTER      -- thread virtual, counting mode
222  *
223  * Statistical profiling modes rely on the PMC periodically delivering
224  * a interrupt to the CPU (when the configured number of events have
225  * been measured), so the PMC must have the ability to generate
226  * interrupts.
227  *
228  * In counting modes, the PMC counts its configured events, with the
229  * value of the PMC being read whenever needed by its owner process.
230  *
231  * The thread specific modes "virtualize" the PMCs -- the PMCs appear
232  * to be thread private and count events only when the profiled thread
233  * actually executes on the CPU.
234  *
235  * The system-wide "global" modes keep the PMCs running all the time
236  * and are used to measure the behaviour of the whole system.
237  */
238 
239 #define	__PMC_MODES()				\
240 	__PMC_MODE(SS,	0)			\
241 	__PMC_MODE(SC,	1)			\
242 	__PMC_MODE(TS,	2)			\
243 	__PMC_MODE(TC,	3)
244 
245 enum pmc_mode {
246 #undef	__PMC_MODE
247 #define	__PMC_MODE(M,N)	PMC_MODE_##M = N,
248 	__PMC_MODES()
249 };
250 
251 #define	PMC_MODE_FIRST	PMC_MODE_SS
252 #define	PMC_MODE_LAST	PMC_MODE_TC
253 
254 #define	PMC_IS_COUNTING_MODE(mode)				\
255 	((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
256 #define	PMC_IS_SYSTEM_MODE(mode)				\
257 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
258 #define	PMC_IS_SAMPLING_MODE(mode)				\
259 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
260 #define	PMC_IS_VIRTUAL_MODE(mode)				\
261 	((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
262 
263 /*
264  * PMC row disposition
265  */
266 
267 #define	__PMC_DISPOSITIONS(N)					\
268 	__PMC_DISP(STANDALONE)	/* global/disabled counters */	\
269 	__PMC_DISP(FREE)	/* free/available */		\
270 	__PMC_DISP(THREAD)	/* thread-virtual PMCs */	\
271 	__PMC_DISP(UNKNOWN)	/* sentinel */
272 
273 enum pmc_disp {
274 #undef	__PMC_DISP
275 #define	__PMC_DISP(D)	PMC_DISP_##D ,
276 	__PMC_DISPOSITIONS()
277 };
278 
279 #define	PMC_DISP_FIRST	PMC_DISP_STANDALONE
280 #define	PMC_DISP_LAST	PMC_DISP_THREAD
281 
282 /*
283  * Counter capabilities
284  *
285  * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
286  */
287 
288 #define	__PMC_CAPS()							\
289 	__PMC_CAP(INTERRUPT,	0, "generate interrupts")		\
290 	__PMC_CAP(USER,		1, "count user-mode events")		\
291 	__PMC_CAP(SYSTEM,	2, "count system-mode events")		\
292 	__PMC_CAP(EDGE,		3, "do edge detection of events")	\
293 	__PMC_CAP(THRESHOLD,	4, "ignore events below a threshold")	\
294 	__PMC_CAP(READ,		5, "read PMC counter")			\
295 	__PMC_CAP(WRITE,	6, "reprogram PMC counter")		\
296 	__PMC_CAP(INVERT,	7, "invert comparison sense")		\
297 	__PMC_CAP(QUALIFIER,	8, "further qualify monitored events")	\
298 	__PMC_CAP(PRECISE,	9, "perform precise sampling")		\
299 	__PMC_CAP(TAGGING,	10, "tag upstream events")		\
300 	__PMC_CAP(CASCADE,	11, "cascade counters")
301 
302 enum pmc_caps
303 {
304 #undef	__PMC_CAP
305 #define	__PMC_CAP(NAME, VALUE, DESCR)	PMC_CAP_##NAME = (1 << VALUE) ,
306 	__PMC_CAPS()
307 };
308 
309 #define	PMC_CAP_FIRST		PMC_CAP_INTERRUPT
310 #define	PMC_CAP_LAST		PMC_CAP_CASCADE
311 
312 /*
313  * PMC Event Numbers
314  *
315  * These are generated from the definitions in "dev/hwpmc/pmc_events.h".
316  */
317 
318 enum pmc_event {
319 #undef	__PMC_EV
320 #undef	__PMC_EV_BLOCK
321 #define	__PMC_EV_BLOCK(C,V)	PMC_EV_ ## C ## __BLOCK_START = (V) - 1 ,
322 #define	__PMC_EV(C,N)		PMC_EV_ ## C ## _ ## N ,
323 	__PMC_EVENTS()
324 };
325 
326 /*
327  * PMC SYSCALL INTERFACE
328  */
329 
330 /*
331  * "PMC_OPS" -- these are the commands recognized by the kernel
332  * module, and are used when performing a system call from userland.
333  */
334 #define	__PMC_OPS()							\
335 	__PMC_OP(CONFIGURELOG, "Set log file")				\
336 	__PMC_OP(FLUSHLOG, "Flush log file")				\
337 	__PMC_OP(GETCPUINFO, "Get system CPU information")		\
338 	__PMC_OP(GETDRIVERSTATS, "Get driver statistics")		\
339 	__PMC_OP(GETMODULEVERSION, "Get module version")		\
340 	__PMC_OP(GETPMCINFO, "Get per-cpu PMC information")		\
341 	__PMC_OP(PMCADMIN, "Set PMC state")				\
342 	__PMC_OP(PMCALLOCATE, "Allocate and configure a PMC")		\
343 	__PMC_OP(PMCATTACH, "Attach a PMC to a process")		\
344 	__PMC_OP(PMCDETACH, "Detach a PMC from a process")		\
345 	__PMC_OP(PMCGETMSR, "Get a PMC's hardware address")		\
346 	__PMC_OP(PMCRELEASE, "Release a PMC")				\
347 	__PMC_OP(PMCRW, "Read/Set a PMC")				\
348 	__PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate")	\
349 	__PMC_OP(PMCSTART, "Start a PMC")				\
350 	__PMC_OP(PMCSTOP, "Stop a PMC")					\
351 	__PMC_OP(WRITELOG, "Write a cookie to the log file")		\
352 	__PMC_OP(CLOSELOG, "Close log file")				\
353 	__PMC_OP(GETDYNEVENTINFO, "Get dynamic events list")
354 
355 
356 enum pmc_ops {
357 #undef	__PMC_OP
358 #define	__PMC_OP(N, D)	PMC_OP_##N,
359 	__PMC_OPS()
360 };
361 
362 
363 /*
364  * Flags used in operations on PMCs.
365  */
366 
367 #define	PMC_F_UNUSED1		0x00000001 /* unused */
368 #define	PMC_F_DESCENDANTS	0x00000002 /*OP ALLOCATE track descendants */
369 #define	PMC_F_LOG_PROCCSW	0x00000004 /*OP ALLOCATE track ctx switches */
370 #define	PMC_F_LOG_PROCEXIT	0x00000008 /*OP ALLOCATE log proc exits */
371 #define	PMC_F_NEWVALUE		0x00000010 /*OP RW write new value */
372 #define	PMC_F_OLDVALUE		0x00000020 /*OP RW get old value */
373 
374 /* V2 API */
375 #define	PMC_F_CALLCHAIN		0x00000080 /*OP ALLOCATE capture callchains */
376 #define	PMC_F_USERCALLCHAIN	0x00000100 /*OP ALLOCATE use userspace stack */
377 
378 /* internal flags */
379 #define	PMC_F_ATTACHED_TO_OWNER	0x00010000 /*attached to owner*/
380 #define	PMC_F_NEEDS_LOGFILE	0x00020000 /*needs log file */
381 #define	PMC_F_ATTACH_DONE	0x00040000 /*attached at least once */
382 
383 #define	PMC_CALLCHAIN_DEPTH_MAX	512
384 
385 #define	PMC_CC_F_USERSPACE	0x01	   /*userspace callchain*/
386 
387 /*
388  * Cookies used to denote allocated PMCs, and the values of PMCs.
389  */
390 
391 typedef uint32_t	pmc_id_t;
392 typedef uint64_t	pmc_value_t;
393 
394 #define	PMC_ID_INVALID		(~ (pmc_id_t) 0)
395 
396 /*
397  * PMC IDs have the following format:
398  *
399  * +-----------------------+-------+-----------+
400  * |   CPU      | PMC MODE | CLASS | ROW INDEX |
401  * +-----------------------+-------+-----------+
402  *
403  * where CPU is 12 bits, MODE 8, CLASS 4, and ROW INDEX 8  Field 'CPU'
404  * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for
405  * process-mode PMCs.  Field 'PMC MODE' is the allocated PMC mode.
406  * Field 'PMC CLASS' is the class of the PMC.  Field 'ROW INDEX' is the
407  * row index for the PMC.
408  *
409  * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total
410  * number of hardware PMCs on this cpu.
411  */
412 
413 
414 #define	PMC_ID_TO_ROWINDEX(ID)	((ID) & 0xFF)
415 #define	PMC_ID_TO_CLASS(ID)	(((ID) & 0xF00) >> 8)
416 #define	PMC_ID_TO_MODE(ID)	(((ID) & 0xFF000) >> 12)
417 #define	PMC_ID_TO_CPU(ID)	(((ID) & 0xFFF00000) >> 20)
418 #define	PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX)			\
419 	((((CPU) & 0xFFF) << 20) | (((MODE) & 0xFF) << 12) |	\
420 	(((CLASS) & 0xF) << 8) | ((ROWINDEX) & 0xFF))
421 
422 /*
423  * Data structures for system calls supported by the pmc driver.
424  */
425 
426 /*
427  * OP PMCALLOCATE
428  *
429  * Allocate a PMC on the named CPU.
430  */
431 
432 #define	PMC_CPU_ANY	~0
433 
434 struct pmc_op_pmcallocate {
435 	uint32_t	pm_caps;	/* PMC_CAP_* */
436 	uint32_t	pm_cpu;		/* CPU number or PMC_CPU_ANY */
437 	enum pmc_class	pm_class;	/* class of PMC desired */
438 	enum pmc_event	pm_ev;		/* [enum pmc_event] desired */
439 	uint32_t	pm_flags;	/* additional modifiers PMC_F_* */
440 	enum pmc_mode	pm_mode;	/* desired mode */
441 	pmc_id_t	pm_pmcid;	/* [return] process pmc id */
442 	pmc_value_t	pm_count;	/* initial/sample count */
443 
444 	union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */
445 };
446 
447 /*
448  * OP PMCADMIN
449  *
450  * Set the administrative state (i.e., whether enabled or disabled) of
451  * a PMC 'pm_pmc' on CPU 'pm_cpu'.  Note that 'pm_pmc' specifies an
452  * absolute PMC number and need not have been first allocated by the
453  * calling process.
454  */
455 
456 struct pmc_op_pmcadmin {
457 	int		pm_cpu;		/* CPU# */
458 	uint32_t	pm_flags;	/* flags */
459 	int		pm_pmc;         /* PMC# */
460 	enum pmc_state  pm_state;	/* desired state */
461 };
462 
463 /*
464  * OP PMCATTACH / OP PMCDETACH
465  *
466  * Attach/detach a PMC and a process.
467  */
468 
469 struct pmc_op_pmcattach {
470 	pmc_id_t	pm_pmc;		/* PMC to attach to */
471 	pid_t		pm_pid;		/* target process */
472 };
473 
474 /*
475  * OP PMCSETCOUNT
476  *
477  * Set the sampling rate (i.e., the reload count) for statistical counters.
478  * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
479  */
480 
481 struct pmc_op_pmcsetcount {
482 	pmc_value_t	pm_count;	/* initial/sample count */
483 	pmc_id_t	pm_pmcid;	/* PMC id to set */
484 };
485 
486 
487 /*
488  * OP PMCRW
489  *
490  * Read the value of a PMC named by 'pm_pmcid'.  'pm_pmcid' needs
491  * to have been previously allocated using PMCALLOCATE.
492  */
493 
494 
495 struct pmc_op_pmcrw {
496 	uint32_t	pm_flags;	/* PMC_F_{OLD,NEW}VALUE*/
497 	pmc_id_t	pm_pmcid;	/* pmc id */
498 	pmc_value_t	pm_value;	/* new&returned value */
499 };
500 
501 
502 /*
503  * OP GETPMCINFO
504  *
505  * retrieve PMC state for a named CPU.  The caller is expected to
506  * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
507  * values.
508  */
509 
510 struct pmc_info {
511 	char		pm_name[PMC_NAME_MAX]; /* pmc name */
512 	enum pmc_class	pm_class;	/* enum pmc_class */
513 	int		pm_enabled;	/* whether enabled */
514 	enum pmc_disp	pm_rowdisp;	/* FREE, THREAD or STANDLONE */
515 	pid_t		pm_ownerpid;	/* owner, or -1 */
516 	enum pmc_mode	pm_mode;	/* current mode [enum pmc_mode] */
517 	enum pmc_event	pm_event;	/* current event */
518 	uint32_t	pm_flags;	/* current flags */
519 	pmc_value_t	pm_reloadcount;	/* sampling counters only */
520 };
521 
522 struct pmc_op_getpmcinfo {
523 	int32_t		pm_cpu;		/* 0 <= cpu < mp_maxid */
524 	struct pmc_info	pm_pmcs[];	/* space for 'npmc' structures */
525 };
526 
527 
528 /*
529  * OP GETCPUINFO
530  *
531  * Retrieve system CPU information.
532  */
533 
534 
535 struct pmc_classinfo {
536 	enum pmc_class	pm_class;	/* class id */
537 	uint32_t	pm_caps;	/* counter capabilities */
538 	uint32_t	pm_width;	/* width of the PMC */
539 	uint32_t	pm_num;		/* number of PMCs in class */
540 };
541 
542 struct pmc_op_getcpuinfo {
543 	enum pmc_cputype pm_cputype; /* what kind of CPU */
544 	uint32_t	pm_ncpu;    /* max CPU number */
545 	uint32_t	pm_npmc;    /* #PMCs per CPU */
546 	uint32_t	pm_nclass;  /* #classes of PMCs */
547 	struct pmc_classinfo  pm_classes[PMC_CLASS_MAX];
548 };
549 
550 /*
551  * OP CONFIGURELOG
552  *
553  * Configure a log file for writing system-wide statistics to.
554  */
555 
556 struct pmc_op_configurelog {
557 	int		pm_flags;
558 	int		pm_logfd;   /* logfile fd (or -1) */
559 };
560 
561 /*
562  * OP GETDRIVERSTATS
563  *
564  * Retrieve pmc(4) driver-wide statistics.
565  */
566 #ifdef _KERNEL
567 struct pmc_driverstats {
568 	counter_u64_t	pm_intr_ignored;	/* #interrupts ignored */
569 	counter_u64_t	pm_intr_processed;	/* #interrupts processed */
570 	counter_u64_t	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
571 	counter_u64_t	pm_syscalls;		/* #syscalls */
572 	counter_u64_t	pm_syscall_errors;	/* #syscalls with errors */
573 	counter_u64_t	pm_buffer_requests;	/* #buffer requests */
574 	counter_u64_t	pm_buffer_requests_failed; /* #failed buffer requests */
575 	counter_u64_t	pm_log_sweeps;		/* #sample buffer processing
576 						   passes */
577 	counter_u64_t	pm_merges;		/* merged k+u */
578 	counter_u64_t	pm_overwrites;		/* UR overwrites */
579 };
580 #endif
581 
582 struct pmc_op_getdriverstats {
583 	unsigned int	pm_intr_ignored;	/* #interrupts ignored */
584 	unsigned int	pm_intr_processed;	/* #interrupts processed */
585 	unsigned int	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
586 	unsigned int	pm_syscalls;		/* #syscalls */
587 	unsigned int	pm_syscall_errors;	/* #syscalls with errors */
588 	unsigned int	pm_buffer_requests;	/* #buffer requests */
589 	unsigned int	pm_buffer_requests_failed; /* #failed buffer requests */
590 	unsigned int	pm_log_sweeps;		/* #sample buffer processing
591 						   passes */
592 };
593 
594 /*
595  * OP RELEASE / OP START / OP STOP
596  *
597  * Simple operations on a PMC id.
598  */
599 
600 struct pmc_op_simple {
601 	pmc_id_t	pm_pmcid;
602 };
603 
604 /*
605  * OP WRITELOG
606  *
607  * Flush the current log buffer and write 4 bytes of user data to it.
608  */
609 
610 struct pmc_op_writelog {
611 	uint32_t	pm_userdata;
612 };
613 
614 /*
615  * OP GETMSR
616  *
617  * Retrieve the machine specific address associated with the allocated
618  * PMC.  This number can be used subsequently with a read-performance-counter
619  * instruction.
620  */
621 
622 struct pmc_op_getmsr {
623 	uint32_t	pm_msr;		/* machine specific address */
624 	pmc_id_t	pm_pmcid;	/* allocated pmc id */
625 };
626 
627 /*
628  * OP GETDYNEVENTINFO
629  *
630  * Retrieve a PMC dynamic class events list.
631  */
632 
633 struct pmc_dyn_event_descr {
634 	char		pm_ev_name[PMC_NAME_MAX];
635 	enum pmc_event	pm_ev_code;
636 };
637 
638 struct pmc_op_getdyneventinfo {
639 	enum pmc_class			pm_class;
640 	unsigned int			pm_nevent;
641 	struct pmc_dyn_event_descr	pm_events[PMC_EV_DYN_COUNT];
642 };
643 
644 #ifdef _KERNEL
645 
646 #include <sys/malloc.h>
647 #include <sys/sysctl.h>
648 #include <sys/_cpuset.h>
649 
650 #include <machine/frame.h>
651 
652 #define	PMC_HASH_SIZE				1024
653 #define	PMC_MTXPOOL_SIZE			2048
654 #define	PMC_LOG_BUFFER_SIZE			256
655 #define	PMC_NLOGBUFFERS_PCPU			32
656 #define	PMC_NSAMPLES				256
657 #define	PMC_CALLCHAIN_DEPTH			128
658 #define	PMC_THREADLIST_MAX			128
659 
660 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
661 
662 /*
663  * Locking keys
664  *
665  * (b) - pmc_bufferlist_mtx (spin lock)
666  * (k) - pmc_kthread_mtx (sleep lock)
667  * (o) - po->po_mtx (spin lock)
668  * (g) - global_epoch_preempt (epoch)
669  * (p) - pmc_sx (sx)
670  */
671 
672 /*
673  * PMC commands
674  */
675 
676 struct pmc_syscall_args {
677 	register_t	pmop_code;	/* one of PMC_OP_* */
678 	void		*pmop_data;	/* syscall parameter */
679 };
680 
681 /*
682  * Interface to processor specific s1tuff
683  */
684 
685 /*
686  * struct pmc_descr
687  *
688  * Machine independent (i.e., the common parts) of a human readable
689  * PMC description.
690  */
691 
692 struct pmc_descr {
693 	char		pd_name[PMC_NAME_MAX]; /* name */
694 	uint32_t	pd_caps;	/* capabilities */
695 	enum pmc_class	pd_class;	/* class of the PMC */
696 	uint32_t	pd_width;	/* width in bits */
697 };
698 
699 /*
700  * struct pmc_target
701  *
702  * This structure records all the target processes associated with a
703  * PMC.
704  */
705 
706 struct pmc_target {
707 	LIST_ENTRY(pmc_target)	pt_next;
708 	struct pmc_process	*pt_process; /* target descriptor */
709 };
710 
711 /*
712  * struct pmc
713  *
714  * Describes each allocated PMC.
715  *
716  * Each PMC has precisely one owner, namely the process that allocated
717  * the PMC.
718  *
719  * A PMC may be attached to multiple target processes.  The
720  * 'pm_targets' field links all the target processes being monitored
721  * by this PMC.
722  *
723  * The 'pm_savedvalue' field is protected by a mutex.
724  *
725  * On a multi-cpu machine, multiple target threads associated with a
726  * process-virtual PMC could be concurrently executing on different
727  * CPUs.  The 'pm_runcount' field is atomically incremented every time
728  * the PMC gets scheduled on a CPU and atomically decremented when it
729  * get descheduled.  Deletion of a PMC is only permitted when this
730  * field is '0'.
731  *
732  */
733 struct pmc_pcpu_state {
734 	uint8_t pps_stalled;
735 	uint8_t pps_cpustate;
736 } __aligned(CACHE_LINE_SIZE);
737 struct pmc {
738 	LIST_HEAD(,pmc_target)	pm_targets;	/* list of target processes */
739 	LIST_ENTRY(pmc)		pm_next;	/* owner's list */
740 
741 	/*
742 	 * System-wide PMCs are allocated on a CPU and are not moved
743 	 * around.  For system-wide PMCs we record the CPU the PMC was
744 	 * allocated on in the 'CPU' field of the pmc ID.
745 	 *
746 	 * Virtual PMCs run on whichever CPU is currently executing
747 	 * their targets' threads.  For these PMCs we need to save
748 	 * their current PMC counter values when they are taken off
749 	 * CPU.
750 	 */
751 
752 	union {
753 		pmc_value_t	pm_savedvalue;	/* Virtual PMCS */
754 	} pm_gv;
755 
756 	/*
757 	 * For sampling mode PMCs, we keep track of the PMC's "reload
758 	 * count", which is the counter value to be loaded in when
759 	 * arming the PMC for the next counting session.  For counting
760 	 * modes on PMCs that are read-only (e.g., the x86 TSC), we
761 	 * keep track of the initial value at the start of
762 	 * counting-mode operation.
763 	 */
764 
765 	union {
766 		pmc_value_t	pm_reloadcount;	/* sampling PMC modes */
767 		pmc_value_t	pm_initial;	/* counting PMC modes */
768 	} pm_sc;
769 
770 	struct pmc_pcpu_state *pm_pcpu_state;
771 	volatile cpuset_t pm_cpustate;	/* CPUs where PMC should be active */
772 	uint32_t	pm_caps;	/* PMC capabilities */
773 	enum pmc_event	pm_event;	/* event being measured */
774 	uint32_t	pm_flags;	/* additional flags PMC_F_... */
775 	struct pmc_owner *pm_owner;	/* owner thread state */
776 	counter_u64_t		pm_runcount;	/* #cpus currently on */
777 	enum pmc_state	pm_state;	/* current PMC state */
778 	uint32_t	pm_overflowcnt;	/* count overflow interrupts */
779 
780 	/*
781 	 * The PMC ID field encodes the row-index for the PMC, its
782 	 * mode, class and the CPU# associated with the PMC.
783 	 */
784 
785 	pmc_id_t	pm_id;		/* allocated PMC id */
786 	enum pmc_class pm_class;
787 
788 	/* md extensions */
789 	union pmc_md_pmc	pm_md;
790 };
791 
792 /*
793  * Accessor macros for 'struct pmc'
794  */
795 
796 #define	PMC_TO_MODE(P)		PMC_ID_TO_MODE((P)->pm_id)
797 #define	PMC_TO_CLASS(P)		PMC_ID_TO_CLASS((P)->pm_id)
798 #define	PMC_TO_ROWINDEX(P)	PMC_ID_TO_ROWINDEX((P)->pm_id)
799 #define	PMC_TO_CPU(P)		PMC_ID_TO_CPU((P)->pm_id)
800 
801 /*
802  * struct pmc_threadpmcstate
803  *
804  * Record per-PMC, per-thread state.
805  */
806 struct pmc_threadpmcstate {
807 	pmc_value_t	pt_pmcval;	/* per-thread reload count */
808 };
809 
810 /*
811  * struct pmc_thread
812  *
813  * Record a 'target' thread being profiled.
814  */
815 struct pmc_thread {
816 	LIST_ENTRY(pmc_thread) pt_next;		/* linked list */
817 	struct thread	*pt_td;			/* target thread */
818 	struct pmc_threadpmcstate pt_pmcs[];	/* per-PMC state */
819 };
820 
821 /*
822  * struct pmc_process
823  *
824  * Record a 'target' process being profiled.
825  *
826  * The target process being profiled could be different from the owner
827  * process which allocated the PMCs.  Each target process descriptor
828  * is associated with NHWPMC 'struct pmc *' pointers.  Each PMC at a
829  * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
830  * array.  The size of this structure is thus PMC architecture
831  * dependent.
832  *
833  */
834 
835 struct pmc_targetstate {
836 	struct pmc	*pp_pmc;   /* target PMC */
837 	pmc_value_t	pp_pmcval; /* per-process value */
838 };
839 
840 struct pmc_process {
841 	LIST_ENTRY(pmc_process) pp_next;	/* hash chain */
842 	LIST_HEAD(,pmc_thread) pp_tds;		/* list of threads */
843 	struct mtx	*pp_tdslock;		/* lock on pp_tds thread list */
844 	int		pp_refcnt;		/* reference count */
845 	uint32_t	pp_flags;		/* flags PMC_PP_* */
846 	struct proc	*pp_proc;		/* target process */
847 	struct pmc_targetstate pp_pmcs[];       /* NHWPMCs */
848 };
849 
850 #define	PMC_PP_ENABLE_MSR_ACCESS	0x00000001
851 
852 /*
853  * struct pmc_owner
854  *
855  * We associate a PMC with an 'owner' process.
856  *
857  * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
858  * lifetime, where NCPUS is the numbers of CPUS in the system and
859  * NHWPMC is the number of hardware PMCs per CPU.  These are
860  * maintained in the list headed by the 'po_pmcs' to save on space.
861  *
862  */
863 
864 struct pmc_owner  {
865 	LIST_ENTRY(pmc_owner)	po_next;	/* hash chain */
866 	CK_LIST_ENTRY(pmc_owner)	po_ssnext;	/* (g/p) list of SS PMC owners */
867 	LIST_HEAD(, pmc)	po_pmcs;	/* owned PMC list */
868 	TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */
869 	struct mtx		po_mtx;		/* spin lock for (o) */
870 	struct proc		*po_owner;	/* owner proc */
871 	uint32_t		po_flags;	/* (k) flags PMC_PO_* */
872 	struct proc		*po_kthread;	/* (k) helper kthread */
873 	struct file		*po_file;	/* file reference */
874 	int			po_error;	/* recorded error */
875 	short			po_sscount;	/* # SS PMCs owned */
876 	short			po_logprocmaps;	/* global mappings done */
877 	struct pmclog_buffer	*po_curbuf[MAXCPU];	/* current log buffer */
878 };
879 
880 #define	PMC_PO_OWNS_LOGFILE		0x00000001 /* has a log file */
881 #define	PMC_PO_SHUTDOWN			0x00000010 /* in the process of shutdown */
882 #define	PMC_PO_INITIAL_MAPPINGS_DONE	0x00000020
883 
884 /*
885  * struct pmc_hw -- describe the state of the PMC hardware
886  *
887  * When in use, a HW PMC is associated with one allocated 'struct pmc'
888  * pointed to by field 'phw_pmc'.  When inactive, this field is NULL.
889  *
890  * On an SMP box, one or more HW PMC's in process virtual mode with
891  * the same 'phw_pmc' could be executing on different CPUs.  In order
892  * to handle this case correctly, we need to ensure that only
893  * incremental counts get added to the saved value in the associated
894  * 'struct pmc'.  The 'phw_save' field is used to keep the saved PMC
895  * value at the time the hardware is started during this context
896  * switch (i.e., the difference between the new (hardware) count and
897  * the saved count is atomically added to the count field in 'struct
898  * pmc' at context switch time).
899  *
900  */
901 
902 struct pmc_hw {
903 	uint32_t	phw_state;	/* see PHW_* macros below */
904 	struct pmc	*phw_pmc;	/* current thread PMC */
905 };
906 
907 #define	PMC_PHW_RI_MASK		0x000000FF
908 #define	PMC_PHW_CPU_SHIFT	8
909 #define	PMC_PHW_CPU_MASK	0x0000FF00
910 #define	PMC_PHW_FLAGS_SHIFT	16
911 #define	PMC_PHW_FLAGS_MASK	0xFFFF0000
912 
913 #define	PMC_PHW_INDEX_TO_STATE(ri)	((ri) & PMC_PHW_RI_MASK)
914 #define	PMC_PHW_STATE_TO_INDEX(state)	((state) & PMC_PHW_RI_MASK)
915 #define	PMC_PHW_CPU_TO_STATE(cpu)	(((cpu) << PMC_PHW_CPU_SHIFT) & \
916 	PMC_PHW_CPU_MASK)
917 #define	PMC_PHW_STATE_TO_CPU(state)	(((state) & PMC_PHW_CPU_MASK) >> \
918 	PMC_PHW_CPU_SHIFT)
919 #define	PMC_PHW_FLAGS_TO_STATE(flags)	(((flags) << PMC_PHW_FLAGS_SHIFT) & \
920 	PMC_PHW_FLAGS_MASK)
921 #define	PMC_PHW_STATE_TO_FLAGS(state)	(((state) & PMC_PHW_FLAGS_MASK) >> \
922 	PMC_PHW_FLAGS_SHIFT)
923 #define	PMC_PHW_FLAG_IS_ENABLED		(PMC_PHW_FLAGS_TO_STATE(0x01))
924 #define	PMC_PHW_FLAG_IS_SHAREABLE	(PMC_PHW_FLAGS_TO_STATE(0x02))
925 
926 /*
927  * struct pmc_sample
928  *
929  * Space for N (tunable) PC samples and associated control data.
930  */
931 
932 struct pmc_sample {
933 	uint16_t		ps_nsamples;	/* callchain depth */
934 	uint16_t		ps_nsamples_actual;
935 	uint16_t		ps_cpu;		/* cpu number */
936 	uint16_t		ps_flags;	/* other flags */
937 	lwpid_t			ps_tid;		/* thread id */
938 	pid_t			ps_pid;		/* process PID or -1 */
939 	struct thread		*ps_td;		/* which thread */
940 	struct pmc		*ps_pmc;	/* interrupting PMC */
941 	uintptr_t		*ps_pc;		/* (const) callchain start */
942 	uint64_t		ps_tsc;		/* tsc value */
943 };
944 
945 #define 	PMC_SAMPLE_FREE		((uint16_t) 0)
946 #define 	PMC_SAMPLE_INUSE	((uint16_t) 0xFFFF)
947 
948 struct pmc_samplebuffer {
949 	struct pmc_sample * volatile ps_read;	/* read pointer */
950 	struct pmc_sample * volatile ps_write;	/* write pointer */
951 	uintptr_t		*ps_callchains;	/* all saved call chains */
952 	struct pmc_sample	*ps_fence;	/* one beyond ps_samples[] */
953 	struct pmc_sample	ps_samples[];	/* array of sample entries */
954 };
955 
956 
957 /*
958  * struct pmc_cpustate
959  *
960  * A CPU is modelled as a collection of HW PMCs with space for additional
961  * flags.
962  */
963 
964 struct pmc_cpu {
965 	uint32_t	pc_state;	/* physical cpu number + flags */
966 	struct pmc_samplebuffer *pc_sb[3]; /* space for samples */
967 	struct pmc_hw	*pc_hwpmcs[];	/* 'npmc' pointers */
968 };
969 
970 #define	PMC_PCPU_CPU_MASK		0x000000FF
971 #define	PMC_PCPU_FLAGS_MASK		0xFFFFFF00
972 #define	PMC_PCPU_FLAGS_SHIFT		8
973 #define	PMC_PCPU_STATE_TO_CPU(S)	((S) & PMC_PCPU_CPU_MASK)
974 #define	PMC_PCPU_STATE_TO_FLAGS(S)	(((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
975 #define	PMC_PCPU_FLAGS_TO_STATE(F)	(((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
976 #define	PMC_PCPU_CPU_TO_STATE(C)	((C) & PMC_PCPU_CPU_MASK)
977 #define	PMC_PCPU_FLAG_HTT		(PMC_PCPU_FLAGS_TO_STATE(0x1))
978 
979 /*
980  * struct pmc_binding
981  *
982  * CPU binding information.
983  */
984 
985 struct pmc_binding {
986 	int	pb_bound;	/* is bound? */
987 	int	pb_cpu;		/* if so, to which CPU */
988 };
989 
990 
991 struct pmc_mdep;
992 
993 /*
994  * struct pmc_classdep
995  *
996  * PMC class-dependent operations.
997  */
998 struct pmc_classdep {
999 	uint32_t	pcd_caps;	/* class capabilities */
1000 	enum pmc_class	pcd_class;	/* class id */
1001 	int		pcd_num;	/* number of PMCs */
1002 	int		pcd_ri;		/* row index of the first PMC in class */
1003 	int		pcd_width;	/* width of the PMC */
1004 
1005 	/* configuring/reading/writing the hardware PMCs */
1006 	int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
1007 	int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm);
1008 	int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value);
1009 	int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value);
1010 
1011 	/* pmc allocation/release */
1012 	int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
1013 		const struct pmc_op_pmcallocate *_a);
1014 	int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
1015 
1016 	/* starting and stopping PMCs */
1017 	int (*pcd_start_pmc)(int _cpu, int _ri);
1018 	int (*pcd_stop_pmc)(int _cpu, int _ri);
1019 
1020 	/* description */
1021 	int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
1022 		struct pmc **_ppmc);
1023 
1024 	/* class-dependent initialization & finalization */
1025 	int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1026 	int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1027 
1028 	/* machine-specific interface */
1029 	int (*pcd_get_msr)(int _ri, uint32_t *_msr);
1030 };
1031 
1032 /*
1033  * struct pmc_mdep
1034  *
1035  * Machine dependent bits needed per CPU type.
1036  */
1037 
1038 struct pmc_mdep  {
1039 	uint32_t	pmd_cputype;    /* from enum pmc_cputype */
1040 	uint32_t	pmd_npmc;	/* number of PMCs per CPU */
1041 	uint32_t	pmd_nclass;	/* number of PMC classes present */
1042 
1043 	/*
1044 	 * Machine dependent methods.
1045 	 */
1046 
1047 	/* per-cpu initialization and finalization */
1048 	int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1049 	int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1050 
1051 	/* thread context switch in/out */
1052 	int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp);
1053 	int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp);
1054 
1055 	/* handle a PMC interrupt */
1056 	int (*pmd_intr)(struct trapframe *_tf);
1057 
1058 	/*
1059 	 * PMC class dependent information.
1060 	 */
1061 	struct pmc_classdep pmd_classdep[];
1062 };
1063 
1064 /*
1065  * Per-CPU state.  This is an array of 'mp_ncpu' pointers
1066  * to struct pmc_cpu descriptors.
1067  */
1068 
1069 extern struct pmc_cpu **pmc_pcpu;
1070 
1071 /* driver statistics */
1072 extern struct pmc_driverstats pmc_stats;
1073 
1074 #if	defined(HWPMC_DEBUG)
1075 #include <sys/ktr.h>
1076 
1077 /* debug flags, major flag groups */
1078 struct pmc_debugflags {
1079 	int	pdb_CPU;
1080 	int	pdb_CSW;
1081 	int	pdb_LOG;
1082 	int	pdb_MDP;
1083 	int	pdb_MOD;
1084 	int	pdb_OWN;
1085 	int	pdb_PMC;
1086 	int	pdb_PRC;
1087 	int	pdb_SAM;
1088 };
1089 
1090 extern struct pmc_debugflags pmc_debugflags;
1091 
1092 #define	KTR_PMC			KTR_SUBSYS
1093 
1094 #define	PMC_DEBUG_STRSIZE		128
1095 #define	PMC_DEBUG_DEFAULT_FLAGS		{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }
1096 
1097 #define	PMCDBG0(M, N, L, F) do {					\
1098 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1099 		CTR0(KTR_PMC, #M ":" #N ":" #L  ": " F);		\
1100 } while (0)
1101 #define	PMCDBG1(M, N, L, F, p1) do {					\
1102 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1103 		CTR1(KTR_PMC, #M ":" #N ":" #L  ": " F, p1);		\
1104 } while (0)
1105 #define	PMCDBG2(M, N, L, F, p1, p2) do {				\
1106 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1107 		CTR2(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2);	\
1108 } while (0)
1109 #define	PMCDBG3(M, N, L, F, p1, p2, p3) do {				\
1110 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1111 		CTR3(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3);	\
1112 } while (0)
1113 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4) do {			\
1114 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1115 		CTR4(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4);\
1116 } while (0)
1117 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do {			\
1118 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1119 		CTR5(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1120 		    p5);						\
1121 } while (0)
1122 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do {		\
1123 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1124 		CTR6(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1125 		    p5, p6);						\
1126 } while (0)
1127 
1128 /* Major numbers */
1129 #define	PMC_DEBUG_MAJ_CPU		0 /* cpu switches */
1130 #define	PMC_DEBUG_MAJ_CSW		1 /* context switches */
1131 #define	PMC_DEBUG_MAJ_LOG		2 /* logging */
1132 #define	PMC_DEBUG_MAJ_MDP		3 /* machine dependent */
1133 #define	PMC_DEBUG_MAJ_MOD		4 /* misc module infrastructure */
1134 #define	PMC_DEBUG_MAJ_OWN		5 /* owner */
1135 #define	PMC_DEBUG_MAJ_PMC		6 /* pmc management */
1136 #define	PMC_DEBUG_MAJ_PRC		7 /* processes */
1137 #define	PMC_DEBUG_MAJ_SAM		8 /* sampling */
1138 
1139 /* Minor numbers */
1140 
1141 /* Common (8 bits) */
1142 #define	PMC_DEBUG_MIN_ALL		0 /* allocation */
1143 #define	PMC_DEBUG_MIN_REL		1 /* release */
1144 #define	PMC_DEBUG_MIN_OPS		2 /* ops: start, stop, ... */
1145 #define	PMC_DEBUG_MIN_INI		3 /* init */
1146 #define	PMC_DEBUG_MIN_FND		4 /* find */
1147 
1148 /* MODULE */
1149 #define	PMC_DEBUG_MIN_PMH	       14 /* pmc_hook */
1150 #define	PMC_DEBUG_MIN_PMS	       15 /* pmc_syscall */
1151 
1152 /* OWN */
1153 #define	PMC_DEBUG_MIN_ORM		8 /* owner remove */
1154 #define	PMC_DEBUG_MIN_OMR		9 /* owner maybe remove */
1155 
1156 /* PROCESSES */
1157 #define	PMC_DEBUG_MIN_TLK		8 /* link target */
1158 #define	PMC_DEBUG_MIN_TUL		9 /* unlink target */
1159 #define	PMC_DEBUG_MIN_EXT	       10 /* process exit */
1160 #define	PMC_DEBUG_MIN_EXC	       11 /* process exec */
1161 #define	PMC_DEBUG_MIN_FRK	       12 /* process fork */
1162 #define	PMC_DEBUG_MIN_ATT	       13 /* attach/detach */
1163 #define	PMC_DEBUG_MIN_SIG	       14 /* signalling */
1164 
1165 /* CONTEXT SWITCHES */
1166 #define	PMC_DEBUG_MIN_SWI		8 /* switch in */
1167 #define	PMC_DEBUG_MIN_SWO		9 /* switch out */
1168 
1169 /* PMC */
1170 #define	PMC_DEBUG_MIN_REG		8 /* pmc register */
1171 #define	PMC_DEBUG_MIN_ALR		9 /* allocate row */
1172 
1173 /* MACHINE DEPENDENT LAYER */
1174 #define	PMC_DEBUG_MIN_REA		8 /* read */
1175 #define	PMC_DEBUG_MIN_WRI		9 /* write */
1176 #define	PMC_DEBUG_MIN_CFG	       10 /* config */
1177 #define	PMC_DEBUG_MIN_STA	       11 /* start */
1178 #define	PMC_DEBUG_MIN_STO	       12 /* stop */
1179 #define	PMC_DEBUG_MIN_INT	       13 /* interrupts */
1180 
1181 /* CPU */
1182 #define	PMC_DEBUG_MIN_BND		8 /* bind */
1183 #define	PMC_DEBUG_MIN_SEL		9 /* select */
1184 
1185 /* LOG */
1186 #define	PMC_DEBUG_MIN_GTB		8 /* get buf */
1187 #define	PMC_DEBUG_MIN_SIO		9 /* schedule i/o */
1188 #define	PMC_DEBUG_MIN_FLS	       10 /* flush */
1189 #define	PMC_DEBUG_MIN_SAM	       11 /* sample */
1190 #define	PMC_DEBUG_MIN_CLO	       12 /* close */
1191 
1192 #else
1193 #define	PMCDBG0(M, N, L, F)		/* nothing */
1194 #define	PMCDBG1(M, N, L, F, p1)
1195 #define	PMCDBG2(M, N, L, F, p1, p2)
1196 #define	PMCDBG3(M, N, L, F, p1, p2, p3)
1197 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4)
1198 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5)
1199 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6)
1200 #endif
1201 
1202 /* declare a dedicated memory pool */
1203 MALLOC_DECLARE(M_PMC);
1204 
1205 /*
1206  * Functions
1207  */
1208 
1209 struct pmc_mdep *pmc_md_initialize(void);	/* MD init function */
1210 void	pmc_md_finalize(struct pmc_mdep *_md);	/* MD fini function */
1211 int	pmc_getrowdisp(int _ri);
1212 int	pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf);
1213 int	pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples,
1214     struct trapframe *_tf);
1215 int	pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples,
1216     struct trapframe *_tf);
1217 struct pmc_mdep *pmc_mdep_alloc(int nclasses);
1218 void pmc_mdep_free(struct pmc_mdep *md);
1219 void pmc_flush_samples(int cpu);
1220 uint64_t pmc_rdtsc(void);
1221 #endif /* _KERNEL */
1222 #endif /* _SYS_PMC_H_ */
1223