xref: /freebsd/sys/sys/pmc.h (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2008, Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #ifndef _SYS_PMC_H_
34 #define	_SYS_PMC_H_
35 
36 #include <dev/hwpmc/pmc_events.h>
37 #include <sys/proc.h>
38 #include <sys/counter.h>
39 #include <machine/pmc_mdep.h>
40 #include <machine/profile.h>
41 #ifdef _KERNEL
42 #include <sys/epoch.h>
43 #include <ck_queue.h>
44 #endif
45 
46 #define	PMC_MODULE_NAME		"hwpmc"
47 #define	PMC_NAME_MAX		64 /* HW counter name size */
48 #define	PMC_CLASS_MAX		8  /* max #classes of PMCs per-system */
49 
50 /*
51  * Kernel<->userland API version number [MMmmpppp]
52  *
53  * Major numbers are to be incremented when an incompatible change to
54  * the ABI occurs that older clients will not be able to handle.
55  *
56  * Minor numbers are incremented when a backwards compatible change
57  * occurs that allows older correct programs to run unchanged.  For
58  * example, when support for a new PMC type is added.
59  *
60  * The patch version is incremented for every bug fix.
61  */
62 #define	PMC_VERSION_MAJOR	0x0A
63 #define	PMC_VERSION_MINOR	0x00
64 #define	PMC_VERSION_PATCH	0x0000
65 
66 #define	PMC_VERSION		(PMC_VERSION_MAJOR << 24 |		\
67 	PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
68 
69 #define PMC_CPUID_LEN 64
70 /* cpu model name for pmu lookup */
71 extern char pmc_cpuid[PMC_CPUID_LEN];
72 
73 /*
74  * Kinds of CPUs known.
75  *
76  * We keep track of CPU variants that need to be distinguished in
77  * some way for PMC operations.  CPU names are grouped by manufacturer
78  * and numbered sparsely in order to minimize changes to the ABI involved
79  * when new CPUs are added.
80  *
81  * Please keep the pmc(3) manual page in sync with this list.
82  */
83 #define	__PMC_CPUS()								\
84     __PMC_CPU(AMD_K7,			0x00,	"AMD K7")			\
85     __PMC_CPU(AMD_K8,			0x01,	"AMD K8")			\
86     __PMC_CPU(INTEL_CORE,		0x87,	"Intel Core Solo/Duo")		\
87     __PMC_CPU(INTEL_CORE2,		0x88,	"Intel Core2")			\
88     __PMC_CPU(INTEL_CORE2EXTREME,	0x89,	"Intel Core2 Extreme")		\
89     __PMC_CPU(INTEL_ATOM,		0x8A,	"Intel Atom")			\
90     __PMC_CPU(INTEL_COREI7,		0x8B,	"Intel Core i7")		\
91     __PMC_CPU(INTEL_WESTMERE,		0x8C,	"Intel Westmere")		\
92     __PMC_CPU(INTEL_SANDYBRIDGE,	0x8D,	"Intel Sandy Bridge")		\
93     __PMC_CPU(INTEL_IVYBRIDGE,		0x8E,	"Intel Ivy Bridge")		\
94     __PMC_CPU(INTEL_SANDYBRIDGE_XEON,	0x8F,	"Intel Sandy Bridge Xeon")	\
95     __PMC_CPU(INTEL_IVYBRIDGE_XEON,	0x90,	"Intel Ivy Bridge Xeon")	\
96     __PMC_CPU(INTEL_HASWELL,		0x91,	"Intel Haswell")		\
97     __PMC_CPU(INTEL_ATOM_SILVERMONT,	0x92,	"Intel Atom Silvermont")	\
98     __PMC_CPU(INTEL_NEHALEM_EX,		0x93,	"Intel Nehalem Xeon 7500")	\
99     __PMC_CPU(INTEL_WESTMERE_EX,	0x94,	"Intel Westmere Xeon E7")	\
100     __PMC_CPU(INTEL_HASWELL_XEON,	0x95,	"Intel Haswell Xeon E5 v3")	\
101     __PMC_CPU(INTEL_BROADWELL,		0x96,	"Intel Broadwell")		\
102     __PMC_CPU(INTEL_BROADWELL_XEON,	0x97,	"Intel Broadwell Xeon")		\
103     __PMC_CPU(INTEL_SKYLAKE,		0x98,	"Intel Skylake")		\
104     __PMC_CPU(INTEL_SKYLAKE_XEON,	0x99,	"Intel Skylake Xeon")		\
105     __PMC_CPU(INTEL_ATOM_GOLDMONT,	0x9A,	"Intel Atom Goldmont")		\
106     __PMC_CPU(INTEL_ICELAKE,		0x9B,	"Intel Icelake")		\
107     __PMC_CPU(INTEL_ICELAKE_XEON,	0x9C,	"Intel Icelake Xeon")		\
108     __PMC_CPU(INTEL_ALDERLAKE,		0x9D,	"Intel Alderlake")		\
109     __PMC_CPU(INTEL_ATOM_GOLDMONT_P,	0x9E,	"Intel Atom Goldmont Plus")	\
110     __PMC_CPU(INTEL_ATOM_TREMONT,	0x9F,	"Intel Atom Tremont")		\
111     __PMC_CPU(INTEL_XSCALE,		0x100,	"Intel XScale")			\
112     __PMC_CPU(PPC_7450,			0x300,	"PowerPC MPC7450")		\
113     __PMC_CPU(PPC_E500,			0x340,	"PowerPC e500 Core")		\
114     __PMC_CPU(PPC_970,			0x380,	"IBM PowerPC 970")		\
115     __PMC_CPU(PPC_POWER8,		0x390,	"IBM POWER8")			\
116     __PMC_CPU(GENERIC,			0x400,	"Generic")			\
117     __PMC_CPU(ARMV7_CORTEX_A5,		0x500,	"ARMv7 Cortex A5")		\
118     __PMC_CPU(ARMV7_CORTEX_A7,		0x501,	"ARMv7 Cortex A7")		\
119     __PMC_CPU(ARMV7_CORTEX_A8,		0x502,	"ARMv7 Cortex A8")		\
120     __PMC_CPU(ARMV7_CORTEX_A9,		0x503,	"ARMv7 Cortex A9")		\
121     __PMC_CPU(ARMV7_CORTEX_A15,		0x504,	"ARMv7 Cortex A15")		\
122     __PMC_CPU(ARMV7_CORTEX_A17,		0x505,	"ARMv7 Cortex A17")		\
123     __PMC_CPU(ARMV8_CORTEX_A53,		0x600,	"ARMv8 Cortex A53")		\
124     __PMC_CPU(ARMV8_CORTEX_A57,		0x601,	"ARMv8 Cortex A57")		\
125     __PMC_CPU(ARMV8_CORTEX_A76,		0x602,	"ARMv8 Cortex A76")
126 
127 enum pmc_cputype {
128 #undef	__PMC_CPU
129 #define	__PMC_CPU(S,V,D)	PMC_CPU_##S = V,
130 	__PMC_CPUS()
131 };
132 
133 #define	PMC_CPU_FIRST	PMC_CPU_AMD_K7
134 #define	PMC_CPU_LAST	PMC_CPU_ARMV8_CORTEX_A76
135 
136 /*
137  * Classes of PMCs
138  */
139 #define	__PMC_CLASSES()								\
140     __PMC_CLASS(TSC,		0x00,	"CPU Timestamp counter")		\
141     __PMC_CLASS(K7,		0x01,	"AMD K7 performance counters")		\
142     __PMC_CLASS(K8,		0x02,	"AMD K8 performance counters")		\
143     __PMC_CLASS(IAF,		0x06,	"Intel Core2/Atom, fixed function")	\
144     __PMC_CLASS(IAP,		0x07,	"Intel Core...Atom, programmable")	\
145     __PMC_CLASS(UCF,		0x08,	"Intel Uncore fixed function")		\
146     __PMC_CLASS(UCP,		0x09,	"Intel Uncore programmable")		\
147     __PMC_CLASS(XSCALE,		0x0A,	"Intel XScale counters")		\
148     __PMC_CLASS(PPC7450,	0x0D,	"Motorola MPC7450 class")		\
149     __PMC_CLASS(PPC970,		0x0E,	"IBM PowerPC 970 class")		\
150     __PMC_CLASS(SOFT,		0x0F,	"Software events")			\
151     __PMC_CLASS(ARMV7,		0x10,	"ARMv7")				\
152     __PMC_CLASS(ARMV8,		0x11,	"ARMv8")				\
153     __PMC_CLASS(E500,		0x13,	"Freescale e500 class")			\
154     __PMC_CLASS(POWER8,		0x15,	"IBM POWER8 class")			\
155     __PMC_CLASS(DMC620_PMU_CD2,	0x16,	"ARM DMC620 Memory Controller PMU CLKDIV2") \
156     __PMC_CLASS(DMC620_PMU_C,	0x17,	"ARM DMC620 Memory Controller PMU CLK")	\
157     __PMC_CLASS(CMN600_PMU,	0x18,	"Arm CoreLink CMN600 Coherent Mesh Network PMU")
158 
159 enum pmc_class {
160 #undef  __PMC_CLASS
161 #define	__PMC_CLASS(S,V,D)	PMC_CLASS_##S = V,
162 	__PMC_CLASSES()
163 };
164 
165 #define	PMC_CLASS_FIRST	PMC_CLASS_TSC
166 #define	PMC_CLASS_LAST	PMC_CLASS_CMN600_PMU
167 
168 /*
169  * A PMC can be in the following states:
170  *
171  * Hardware states:
172  *   DISABLED   -- administratively prohibited from being used.
173  *   FREE       -- HW available for use
174  * Software states:
175  *   ALLOCATED  -- allocated
176  *   STOPPED    -- allocated, but not counting events
177  *   RUNNING    -- allocated, and in operation; 'pm_runcount'
178  *                 holds the number of CPUs using this PMC at
179  *                 a given instant
180  *   DELETED    -- being destroyed
181  */
182 
183 #define	__PMC_HWSTATES()			\
184 	__PMC_STATE(DISABLED)			\
185 	__PMC_STATE(FREE)
186 
187 #define	__PMC_SWSTATES()			\
188 	__PMC_STATE(ALLOCATED)			\
189 	__PMC_STATE(STOPPED)			\
190 	__PMC_STATE(RUNNING)			\
191 	__PMC_STATE(DELETED)
192 
193 #define	__PMC_STATES()				\
194 	__PMC_HWSTATES()			\
195 	__PMC_SWSTATES()
196 
197 enum pmc_state {
198 #undef	__PMC_STATE
199 #define	__PMC_STATE(S)	PMC_STATE_##S,
200 	__PMC_STATES()
201 	__PMC_STATE(MAX)
202 };
203 
204 #define	PMC_STATE_FIRST	PMC_STATE_DISABLED
205 #define	PMC_STATE_LAST	PMC_STATE_DELETED
206 
207 /*
208  * An allocated PMC may used as a 'global' counter or as a
209  * 'thread-private' one.  Each such mode of use can be in either
210  * statistical sampling mode or in counting mode.  Thus a PMC in use
211  *
212  * SS i.e., SYSTEM STATISTICAL  -- system-wide statistical profiling
213  * SC i.e., SYSTEM COUNTER      -- system-wide counting mode
214  * TS i.e., THREAD STATISTICAL  -- thread virtual, statistical profiling
215  * TC i.e., THREAD COUNTER      -- thread virtual, counting mode
216  *
217  * Statistical profiling modes rely on the PMC periodically delivering
218  * a interrupt to the CPU (when the configured number of events have
219  * been measured), so the PMC must have the ability to generate
220  * interrupts.
221  *
222  * In counting modes, the PMC counts its configured events, with the
223  * value of the PMC being read whenever needed by its owner process.
224  *
225  * The thread specific modes "virtualize" the PMCs -- the PMCs appear
226  * to be thread private and count events only when the profiled thread
227  * actually executes on the CPU.
228  *
229  * The system-wide "global" modes keep the PMCs running all the time
230  * and are used to measure the behaviour of the whole system.
231  */
232 
233 #define	__PMC_MODES()				\
234 	__PMC_MODE(SS,	0)			\
235 	__PMC_MODE(SC,	1)			\
236 	__PMC_MODE(TS,	2)			\
237 	__PMC_MODE(TC,	3)
238 
239 enum pmc_mode {
240 #undef	__PMC_MODE
241 #define	__PMC_MODE(M,N)	PMC_MODE_##M = N,
242 	__PMC_MODES()
243 };
244 
245 #define	PMC_MODE_FIRST	PMC_MODE_SS
246 #define	PMC_MODE_LAST	PMC_MODE_TC
247 
248 #define	PMC_IS_COUNTING_MODE(mode)				\
249 	((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
250 #define	PMC_IS_SYSTEM_MODE(mode)				\
251 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
252 #define	PMC_IS_SAMPLING_MODE(mode)				\
253 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
254 #define	PMC_IS_VIRTUAL_MODE(mode)				\
255 	((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
256 
257 /*
258  * PMC row disposition
259  */
260 
261 #define	__PMC_DISPOSITIONS(N)					\
262 	__PMC_DISP(STANDALONE)	/* global/disabled counters */	\
263 	__PMC_DISP(FREE)	/* free/available */		\
264 	__PMC_DISP(THREAD)	/* thread-virtual PMCs */	\
265 	__PMC_DISP(UNKNOWN)	/* sentinel */
266 
267 enum pmc_disp {
268 #undef	__PMC_DISP
269 #define	__PMC_DISP(D)	PMC_DISP_##D ,
270 	__PMC_DISPOSITIONS()
271 };
272 
273 #define	PMC_DISP_FIRST	PMC_DISP_STANDALONE
274 #define	PMC_DISP_LAST	PMC_DISP_THREAD
275 
276 /*
277  * Counter capabilities
278  *
279  * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
280  */
281 
282 #define	__PMC_CAPS()							\
283 	__PMC_CAP(INTERRUPT,	0, "generate interrupts")		\
284 	__PMC_CAP(USER,		1, "count user-mode events")		\
285 	__PMC_CAP(SYSTEM,	2, "count system-mode events")		\
286 	__PMC_CAP(EDGE,		3, "do edge detection of events")	\
287 	__PMC_CAP(THRESHOLD,	4, "ignore events below a threshold")	\
288 	__PMC_CAP(READ,		5, "read PMC counter")			\
289 	__PMC_CAP(WRITE,	6, "reprogram PMC counter")		\
290 	__PMC_CAP(INVERT,	7, "invert comparison sense")		\
291 	__PMC_CAP(QUALIFIER,	8, "further qualify monitored events")	\
292 	__PMC_CAP(PRECISE,	9, "perform precise sampling")		\
293 	__PMC_CAP(TAGGING,	10, "tag upstream events")		\
294 	__PMC_CAP(CASCADE,	11, "cascade counters")			\
295 	__PMC_CAP(SYSWIDE,	12, "system wide counter")		\
296 	__PMC_CAP(DOMWIDE,	13, "NUMA domain wide counter")
297 
298 enum pmc_caps
299 {
300 #undef	__PMC_CAP
301 #define	__PMC_CAP(NAME, VALUE, DESCR)	PMC_CAP_##NAME = (1 << VALUE) ,
302 	__PMC_CAPS()
303 };
304 
305 #define	PMC_CAP_FIRST		PMC_CAP_INTERRUPT
306 #define	PMC_CAP_LAST		PMC_CAP_DOMWIDE
307 
308 /*
309  * PMC Event Numbers
310  *
311  * These are generated from the definitions in "dev/hwpmc/pmc_events.h".
312  */
313 
314 enum pmc_event {
315 #undef	__PMC_EV
316 #undef	__PMC_EV_BLOCK
317 #define	__PMC_EV_BLOCK(C,V)	PMC_EV_ ## C ## __BLOCK_START = (V) - 1 ,
318 #define	__PMC_EV(C,N)		PMC_EV_ ## C ## _ ## N ,
319 	__PMC_EVENTS()
320 };
321 
322 /*
323  * PMC SYSCALL INTERFACE
324  */
325 
326 /*
327  * "PMC_OPS" -- these are the commands recognized by the kernel
328  * module, and are used when performing a system call from userland.
329  */
330 #define	__PMC_OPS()							\
331 	__PMC_OP(CONFIGURELOG, "Set log file")				\
332 	__PMC_OP(FLUSHLOG, "Flush log file")				\
333 	__PMC_OP(GETCPUINFO, "Get system CPU information")		\
334 	__PMC_OP(GETDRIVERSTATS, "Get driver statistics")		\
335 	__PMC_OP(GETMODULEVERSION, "Get module version")		\
336 	__PMC_OP(GETPMCINFO, "Get per-cpu PMC information")		\
337 	__PMC_OP(PMCADMIN, "Set PMC state")				\
338 	__PMC_OP(PMCALLOCATE, "Allocate and configure a PMC")		\
339 	__PMC_OP(PMCATTACH, "Attach a PMC to a process")		\
340 	__PMC_OP(PMCDETACH, "Detach a PMC from a process")		\
341 	__PMC_OP(PMCGETMSR, "Get a PMC's hardware address")		\
342 	__PMC_OP(PMCRELEASE, "Release a PMC")				\
343 	__PMC_OP(PMCRW, "Read/Set a PMC")				\
344 	__PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate")	\
345 	__PMC_OP(PMCSTART, "Start a PMC")				\
346 	__PMC_OP(PMCSTOP, "Stop a PMC")					\
347 	__PMC_OP(WRITELOG, "Write a cookie to the log file")		\
348 	__PMC_OP(CLOSELOG, "Close log file")				\
349 	__PMC_OP(GETDYNEVENTINFO, "Get dynamic events list")
350 
351 enum pmc_ops {
352 #undef	__PMC_OP
353 #define	__PMC_OP(N, D)	PMC_OP_##N,
354 	__PMC_OPS()
355 };
356 
357 /*
358  * Flags used in operations on PMCs.
359  */
360 
361 #define	PMC_F_UNUSED1		0x00000001 /* unused */
362 #define	PMC_F_DESCENDANTS	0x00000002 /*OP ALLOCATE track descendants */
363 #define	PMC_F_LOG_PROCCSW	0x00000004 /*OP ALLOCATE track ctx switches */
364 #define	PMC_F_LOG_PROCEXIT	0x00000008 /*OP ALLOCATE log proc exits */
365 #define	PMC_F_NEWVALUE		0x00000010 /*OP RW write new value */
366 #define	PMC_F_OLDVALUE		0x00000020 /*OP RW get old value */
367 
368 /* V2 API */
369 #define	PMC_F_CALLCHAIN		0x00000080 /*OP ALLOCATE capture callchains */
370 #define	PMC_F_USERCALLCHAIN	0x00000100 /*OP ALLOCATE use userspace stack */
371 
372 /* internal flags */
373 #define	PMC_F_ATTACHED_TO_OWNER	0x00010000 /*attached to owner*/
374 #define	PMC_F_NEEDS_LOGFILE	0x00020000 /*needs log file */
375 #define	PMC_F_ATTACH_DONE	0x00040000 /*attached at least once */
376 
377 #define	PMC_CALLCHAIN_DEPTH_MAX	512
378 
379 #define	PMC_CC_F_USERSPACE	0x01	   /*userspace callchain*/
380 
381 /*
382  * Cookies used to denote allocated PMCs, and the values of PMCs.
383  */
384 
385 typedef uint32_t	pmc_id_t;
386 typedef uint64_t	pmc_value_t;
387 
388 #define	PMC_ID_INVALID		(~ (pmc_id_t) 0)
389 
390 /*
391  * PMC IDs have the following format:
392  *
393  * +-----------------------+-------+-----------+
394  * |   CPU      | PMC MODE | CLASS | ROW INDEX |
395  * +-----------------------+-------+-----------+
396  *
397  * where CPU is 12 bits, MODE 4, CLASS 8, and ROW INDEX 8  Field 'CPU'
398  * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for
399  * process-mode PMCs.  Field 'PMC MODE' is the allocated PMC mode.
400  * Field 'PMC CLASS' is the class of the PMC.  Field 'ROW INDEX' is the
401  * row index for the PMC.
402  *
403  * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total
404  * number of hardware PMCs on this cpu.
405  */
406 
407 #define	PMC_ID_TO_ROWINDEX(ID)	((ID) & 0xFF)
408 #define	PMC_ID_TO_CLASS(ID)	(((ID) & 0xFF00) >> 8)
409 #define	PMC_ID_TO_MODE(ID)	(((ID) & 0xF0000) >> 16)
410 #define	PMC_ID_TO_CPU(ID)	(((ID) & 0xFFF00000) >> 20)
411 #define	PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX)			\
412 	((((CPU) & 0xFFF) << 20) | (((MODE) & 0xF) << 16) |	\
413 	(((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF))
414 
415 /*
416  * Data structures for system calls supported by the pmc driver.
417  */
418 
419 /*
420  * OP PMCALLOCATE
421  *
422  * Allocate a PMC on the named CPU.
423  */
424 
425 #define	PMC_CPU_ANY	~0
426 
427 struct pmc_op_pmcallocate {
428 	uint32_t	pm_caps;	/* PMC_CAP_* */
429 	uint32_t	pm_cpu;		/* CPU number or PMC_CPU_ANY */
430 	enum pmc_class	pm_class;	/* class of PMC desired */
431 	enum pmc_event	pm_ev;		/* [enum pmc_event] desired */
432 	uint32_t	pm_flags;	/* additional modifiers PMC_F_* */
433 	enum pmc_mode	pm_mode;	/* desired mode */
434 	pmc_id_t	pm_pmcid;	/* [return] process pmc id */
435 	pmc_value_t	pm_count;	/* initial/sample count */
436 
437 	union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */
438 };
439 
440 /*
441  * OP PMCADMIN
442  *
443  * Set the administrative state (i.e., whether enabled or disabled) of
444  * a PMC 'pm_pmc' on CPU 'pm_cpu'.  Note that 'pm_pmc' specifies an
445  * absolute PMC number and need not have been first allocated by the
446  * calling process.
447  */
448 
449 struct pmc_op_pmcadmin {
450 	int		pm_cpu;		/* CPU# */
451 	uint32_t	pm_flags;	/* flags */
452 	int		pm_pmc;         /* PMC# */
453 	enum pmc_state  pm_state;	/* desired state */
454 };
455 
456 /*
457  * OP PMCATTACH / OP PMCDETACH
458  *
459  * Attach/detach a PMC and a process.
460  */
461 
462 struct pmc_op_pmcattach {
463 	pmc_id_t	pm_pmc;		/* PMC to attach to */
464 	pid_t		pm_pid;		/* target process */
465 };
466 
467 /*
468  * OP PMCSETCOUNT
469  *
470  * Set the sampling rate (i.e., the reload count) for statistical counters.
471  * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
472  */
473 
474 struct pmc_op_pmcsetcount {
475 	pmc_value_t	pm_count;	/* initial/sample count */
476 	pmc_id_t	pm_pmcid;	/* PMC id to set */
477 };
478 
479 /*
480  * OP PMCRW
481  *
482  * Read the value of a PMC named by 'pm_pmcid'.  'pm_pmcid' needs
483  * to have been previously allocated using PMCALLOCATE.
484  */
485 
486 struct pmc_op_pmcrw {
487 	uint32_t	pm_flags;	/* PMC_F_{OLD,NEW}VALUE*/
488 	pmc_id_t	pm_pmcid;	/* pmc id */
489 	pmc_value_t	pm_value;	/* new&returned value */
490 };
491 
492 /*
493  * OP GETPMCINFO
494  *
495  * retrieve PMC state for a named CPU.  The caller is expected to
496  * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
497  * values.
498  */
499 
500 struct pmc_info {
501 	char		pm_name[PMC_NAME_MAX]; /* pmc name */
502 	enum pmc_class	pm_class;	/* enum pmc_class */
503 	int		pm_enabled;	/* whether enabled */
504 	enum pmc_disp	pm_rowdisp;	/* FREE, THREAD or STANDLONE */
505 	pid_t		pm_ownerpid;	/* owner, or -1 */
506 	enum pmc_mode	pm_mode;	/* current mode [enum pmc_mode] */
507 	enum pmc_event	pm_event;	/* current event */
508 	uint32_t	pm_flags;	/* current flags */
509 	pmc_value_t	pm_reloadcount;	/* sampling counters only */
510 };
511 
512 struct pmc_op_getpmcinfo {
513 	int32_t		pm_cpu;		/* 0 <= cpu < mp_maxid */
514 	struct pmc_info	pm_pmcs[];	/* space for 'npmc' structures */
515 };
516 
517 /*
518  * OP GETCPUINFO
519  *
520  * Retrieve system CPU information.
521  */
522 
523 struct pmc_classinfo {
524 	enum pmc_class	pm_class;	/* class id */
525 	uint32_t	pm_caps;	/* counter capabilities */
526 	uint32_t	pm_width;	/* width of the PMC */
527 	uint32_t	pm_num;		/* number of PMCs in class */
528 };
529 
530 struct pmc_op_getcpuinfo {
531 	enum pmc_cputype pm_cputype; /* what kind of CPU */
532 	uint32_t	pm_ncpu;    /* max CPU number */
533 	uint32_t	pm_npmc;    /* #PMCs per CPU */
534 	uint32_t	pm_nclass;  /* #classes of PMCs */
535 	struct pmc_classinfo  pm_classes[PMC_CLASS_MAX];
536 };
537 
538 /*
539  * OP CONFIGURELOG
540  *
541  * Configure a log file for writing system-wide statistics to.
542  */
543 
544 struct pmc_op_configurelog {
545 	int		pm_flags;
546 	int		pm_logfd;   /* logfile fd (or -1) */
547 };
548 
549 /*
550  * OP GETDRIVERSTATS
551  *
552  * Retrieve pmc(4) driver-wide statistics.
553  */
554 #ifdef _KERNEL
555 struct pmc_driverstats {
556 	counter_u64_t	pm_intr_ignored;	/* #interrupts ignored */
557 	counter_u64_t	pm_intr_processed;	/* #interrupts processed */
558 	counter_u64_t	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
559 	counter_u64_t	pm_syscalls;		/* #syscalls */
560 	counter_u64_t	pm_syscall_errors;	/* #syscalls with errors */
561 	counter_u64_t	pm_buffer_requests;	/* #buffer requests */
562 	counter_u64_t	pm_buffer_requests_failed; /* #failed buffer requests */
563 	counter_u64_t	pm_log_sweeps;		/* #sample buffer processing
564 						   passes */
565 	counter_u64_t	pm_merges;		/* merged k+u */
566 	counter_u64_t	pm_overwrites;		/* UR overwrites */
567 };
568 #endif
569 
570 struct pmc_op_getdriverstats {
571 	unsigned int	pm_intr_ignored;	/* #interrupts ignored */
572 	unsigned int	pm_intr_processed;	/* #interrupts processed */
573 	unsigned int	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
574 	unsigned int	pm_syscalls;		/* #syscalls */
575 	unsigned int	pm_syscall_errors;	/* #syscalls with errors */
576 	unsigned int	pm_buffer_requests;	/* #buffer requests */
577 	unsigned int	pm_buffer_requests_failed; /* #failed buffer requests */
578 	unsigned int	pm_log_sweeps;		/* #sample buffer processing
579 						   passes */
580 };
581 
582 /*
583  * OP RELEASE / OP START / OP STOP
584  *
585  * Simple operations on a PMC id.
586  */
587 
588 struct pmc_op_simple {
589 	pmc_id_t	pm_pmcid;
590 };
591 
592 /*
593  * OP WRITELOG
594  *
595  * Flush the current log buffer and write 4 bytes of user data to it.
596  */
597 
598 struct pmc_op_writelog {
599 	uint32_t	pm_userdata;
600 };
601 
602 /*
603  * OP GETMSR
604  *
605  * Retrieve the machine specific address associated with the allocated
606  * PMC.  This number can be used subsequently with a read-performance-counter
607  * instruction.
608  */
609 
610 struct pmc_op_getmsr {
611 	uint32_t	pm_msr;		/* machine specific address */
612 	pmc_id_t	pm_pmcid;	/* allocated pmc id */
613 };
614 
615 /*
616  * OP GETDYNEVENTINFO
617  *
618  * Retrieve a PMC dynamic class events list.
619  */
620 
621 struct pmc_dyn_event_descr {
622 	char		pm_ev_name[PMC_NAME_MAX];
623 	enum pmc_event	pm_ev_code;
624 };
625 
626 struct pmc_op_getdyneventinfo {
627 	enum pmc_class			pm_class;
628 	unsigned int			pm_nevent;
629 	struct pmc_dyn_event_descr	pm_events[PMC_EV_DYN_COUNT];
630 };
631 
632 #ifdef _KERNEL
633 
634 #include <sys/malloc.h>
635 #include <sys/sysctl.h>
636 #include <sys/_cpuset.h>
637 
638 #include <machine/frame.h>
639 
640 #define	PMC_HASH_SIZE				1024
641 #define	PMC_MTXPOOL_SIZE			2048
642 #define	PMC_LOG_BUFFER_SIZE			256
643 #define	PMC_NLOGBUFFERS_PCPU			32
644 #define	PMC_NSAMPLES				256
645 #define	PMC_CALLCHAIN_DEPTH			128
646 #define	PMC_THREADLIST_MAX			128
647 
648 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
649 
650 /*
651  * Locking keys
652  *
653  * (b) - pmc_bufferlist_mtx (spin lock)
654  * (k) - pmc_kthread_mtx (sleep lock)
655  * (o) - po->po_mtx (spin lock)
656  * (g) - global_epoch_preempt (epoch)
657  * (p) - pmc_sx (sx)
658  */
659 
660 /*
661  * PMC commands
662  */
663 
664 struct pmc_syscall_args {
665 	register_t	pmop_code;	/* one of PMC_OP_* */
666 	void		*pmop_data;	/* syscall parameter */
667 };
668 
669 /*
670  * Interface to processor specific s1tuff
671  */
672 
673 /*
674  * struct pmc_descr
675  *
676  * Machine independent (i.e., the common parts) of a human readable
677  * PMC description.
678  */
679 
680 struct pmc_descr {
681 	char		pd_name[PMC_NAME_MAX]; /* name */
682 	uint32_t	pd_caps;	/* capabilities */
683 	enum pmc_class	pd_class;	/* class of the PMC */
684 	uint32_t	pd_width;	/* width in bits */
685 };
686 
687 /*
688  * struct pmc_target
689  *
690  * This structure records all the target processes associated with a
691  * PMC.
692  */
693 
694 struct pmc_target {
695 	LIST_ENTRY(pmc_target)	pt_next;
696 	struct pmc_process	*pt_process; /* target descriptor */
697 };
698 
699 /*
700  * struct pmc
701  *
702  * Describes each allocated PMC.
703  *
704  * Each PMC has precisely one owner, namely the process that allocated
705  * the PMC.
706  *
707  * A PMC may be attached to multiple target processes.  The
708  * 'pm_targets' field links all the target processes being monitored
709  * by this PMC.
710  *
711  * The 'pm_savedvalue' field is protected by a mutex.
712  *
713  * On a multi-cpu machine, multiple target threads associated with a
714  * process-virtual PMC could be concurrently executing on different
715  * CPUs.  The 'pm_runcount' field is atomically incremented every time
716  * the PMC gets scheduled on a CPU and atomically decremented when it
717  * get descheduled.  Deletion of a PMC is only permitted when this
718  * field is '0'.
719  *
720  */
721 struct pmc_pcpu_state {
722 	uint32_t pps_overflowcnt;	/* count overflow interrupts */
723 	uint8_t pps_stalled;
724 	uint8_t pps_cpustate;
725 } __aligned(CACHE_LINE_SIZE);
726 struct pmc {
727 	LIST_HEAD(,pmc_target)	pm_targets;	/* list of target processes */
728 	LIST_ENTRY(pmc)		pm_next;	/* owner's list */
729 
730 	/*
731 	 * System-wide PMCs are allocated on a CPU and are not moved
732 	 * around.  For system-wide PMCs we record the CPU the PMC was
733 	 * allocated on in the 'CPU' field of the pmc ID.
734 	 *
735 	 * Virtual PMCs run on whichever CPU is currently executing
736 	 * their targets' threads.  For these PMCs we need to save
737 	 * their current PMC counter values when they are taken off
738 	 * CPU.
739 	 */
740 
741 	union {
742 		pmc_value_t	pm_savedvalue;	/* Virtual PMCS */
743 	} pm_gv;
744 
745 	/*
746 	 * For sampling mode PMCs, we keep track of the PMC's "reload
747 	 * count", which is the counter value to be loaded in when
748 	 * arming the PMC for the next counting session.  For counting
749 	 * modes on PMCs that are read-only (e.g., the x86 TSC), we
750 	 * keep track of the initial value at the start of
751 	 * counting-mode operation.
752 	 */
753 
754 	union {
755 		pmc_value_t	pm_reloadcount;	/* sampling PMC modes */
756 		pmc_value_t	pm_initial;	/* counting PMC modes */
757 	} pm_sc;
758 
759 	struct pmc_pcpu_state *pm_pcpu_state;
760 	volatile cpuset_t pm_cpustate;	/* CPUs where PMC should be active */
761 	uint32_t	pm_caps;	/* PMC capabilities */
762 	enum pmc_event	pm_event;	/* event being measured */
763 	uint32_t	pm_flags;	/* additional flags PMC_F_... */
764 	struct pmc_owner *pm_owner;	/* owner thread state */
765 	counter_u64_t		pm_runcount;	/* #cpus currently on */
766 	enum pmc_state	pm_state;	/* current PMC state */
767 
768 	/*
769 	 * The PMC ID field encodes the row-index for the PMC, its
770 	 * mode, class and the CPU# associated with the PMC.
771 	 */
772 
773 	pmc_id_t	pm_id;		/* allocated PMC id */
774 	enum pmc_class pm_class;
775 
776 	/* md extensions */
777 	union pmc_md_pmc	pm_md;
778 };
779 
780 /*
781  * Accessor macros for 'struct pmc'
782  */
783 
784 #define	PMC_TO_MODE(P)		PMC_ID_TO_MODE((P)->pm_id)
785 #define	PMC_TO_CLASS(P)		PMC_ID_TO_CLASS((P)->pm_id)
786 #define	PMC_TO_ROWINDEX(P)	PMC_ID_TO_ROWINDEX((P)->pm_id)
787 #define	PMC_TO_CPU(P)		PMC_ID_TO_CPU((P)->pm_id)
788 
789 /*
790  * struct pmc_threadpmcstate
791  *
792  * Record per-PMC, per-thread state.
793  */
794 struct pmc_threadpmcstate {
795 	pmc_value_t	pt_pmcval;	/* per-thread reload count */
796 };
797 
798 /*
799  * struct pmc_thread
800  *
801  * Record a 'target' thread being profiled.
802  */
803 struct pmc_thread {
804 	LIST_ENTRY(pmc_thread) pt_next;		/* linked list */
805 	struct thread	*pt_td;			/* target thread */
806 	struct pmc_threadpmcstate pt_pmcs[];	/* per-PMC state */
807 };
808 
809 /*
810  * struct pmc_process
811  *
812  * Record a 'target' process being profiled.
813  *
814  * The target process being profiled could be different from the owner
815  * process which allocated the PMCs.  Each target process descriptor
816  * is associated with NHWPMC 'struct pmc *' pointers.  Each PMC at a
817  * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
818  * array.  The size of this structure is thus PMC architecture
819  * dependent.
820  *
821  */
822 
823 struct pmc_targetstate {
824 	struct pmc	*pp_pmc;   /* target PMC */
825 	pmc_value_t	pp_pmcval; /* per-process value */
826 };
827 
828 struct pmc_process {
829 	LIST_ENTRY(pmc_process) pp_next;	/* hash chain */
830 	LIST_HEAD(,pmc_thread) pp_tds;		/* list of threads */
831 	struct mtx	*pp_tdslock;		/* lock on pp_tds thread list */
832 	int		pp_refcnt;		/* reference count */
833 	uint32_t	pp_flags;		/* flags PMC_PP_* */
834 	struct proc	*pp_proc;		/* target process */
835 	struct pmc_targetstate pp_pmcs[];       /* NHWPMCs */
836 };
837 
838 #define	PMC_PP_ENABLE_MSR_ACCESS	0x00000001
839 
840 /*
841  * struct pmc_owner
842  *
843  * We associate a PMC with an 'owner' process.
844  *
845  * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
846  * lifetime, where NCPUS is the numbers of CPUS in the system and
847  * NHWPMC is the number of hardware PMCs per CPU.  These are
848  * maintained in the list headed by the 'po_pmcs' to save on space.
849  *
850  */
851 
852 struct pmc_owner  {
853 	LIST_ENTRY(pmc_owner)	po_next;	/* hash chain */
854 	CK_LIST_ENTRY(pmc_owner)	po_ssnext;	/* (g/p) list of SS PMC owners */
855 	LIST_HEAD(, pmc)	po_pmcs;	/* owned PMC list */
856 	TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */
857 	struct mtx		po_mtx;		/* spin lock for (o) */
858 	struct proc		*po_owner;	/* owner proc */
859 	uint32_t		po_flags;	/* (k) flags PMC_PO_* */
860 	struct proc		*po_kthread;	/* (k) helper kthread */
861 	struct file		*po_file;	/* file reference */
862 	int			po_error;	/* recorded error */
863 	short			po_sscount;	/* # SS PMCs owned */
864 	short			po_logprocmaps;	/* global mappings done */
865 	struct pmclog_buffer	*po_curbuf[MAXCPU];	/* current log buffer */
866 };
867 
868 #define	PMC_PO_OWNS_LOGFILE		0x00000001 /* has a log file */
869 #define	PMC_PO_SHUTDOWN			0x00000010 /* in the process of shutdown */
870 #define	PMC_PO_INITIAL_MAPPINGS_DONE	0x00000020
871 
872 /*
873  * struct pmc_hw -- describe the state of the PMC hardware
874  *
875  * When in use, a HW PMC is associated with one allocated 'struct pmc'
876  * pointed to by field 'phw_pmc'.  When inactive, this field is NULL.
877  *
878  * On an SMP box, one or more HW PMC's in process virtual mode with
879  * the same 'phw_pmc' could be executing on different CPUs.  In order
880  * to handle this case correctly, we need to ensure that only
881  * incremental counts get added to the saved value in the associated
882  * 'struct pmc'.  The 'phw_save' field is used to keep the saved PMC
883  * value at the time the hardware is started during this context
884  * switch (i.e., the difference between the new (hardware) count and
885  * the saved count is atomically added to the count field in 'struct
886  * pmc' at context switch time).
887  *
888  */
889 
890 struct pmc_hw {
891 	uint32_t	phw_state;	/* see PHW_* macros below */
892 	struct pmc	*phw_pmc;	/* current thread PMC */
893 };
894 
895 #define	PMC_PHW_RI_MASK		0x000000FF
896 #define	PMC_PHW_CPU_SHIFT	8
897 #define	PMC_PHW_CPU_MASK	0x0000FF00
898 #define	PMC_PHW_FLAGS_SHIFT	16
899 #define	PMC_PHW_FLAGS_MASK	0xFFFF0000
900 
901 #define	PMC_PHW_INDEX_TO_STATE(ri)	((ri) & PMC_PHW_RI_MASK)
902 #define	PMC_PHW_STATE_TO_INDEX(state)	((state) & PMC_PHW_RI_MASK)
903 #define	PMC_PHW_CPU_TO_STATE(cpu)	(((cpu) << PMC_PHW_CPU_SHIFT) & \
904 	PMC_PHW_CPU_MASK)
905 #define	PMC_PHW_STATE_TO_CPU(state)	(((state) & PMC_PHW_CPU_MASK) >> \
906 	PMC_PHW_CPU_SHIFT)
907 #define	PMC_PHW_FLAGS_TO_STATE(flags)	(((flags) << PMC_PHW_FLAGS_SHIFT) & \
908 	PMC_PHW_FLAGS_MASK)
909 #define	PMC_PHW_STATE_TO_FLAGS(state)	(((state) & PMC_PHW_FLAGS_MASK) >> \
910 	PMC_PHW_FLAGS_SHIFT)
911 #define	PMC_PHW_FLAG_IS_ENABLED		(PMC_PHW_FLAGS_TO_STATE(0x01))
912 #define	PMC_PHW_FLAG_IS_SHAREABLE	(PMC_PHW_FLAGS_TO_STATE(0x02))
913 
914 /*
915  * struct pmc_sample
916  *
917  * Space for N (tunable) PC samples and associated control data.
918  */
919 
920 struct pmc_sample {
921 	uint16_t		ps_nsamples;	/* callchain depth */
922 	uint16_t		ps_nsamples_actual;
923 	uint16_t		ps_cpu;		/* cpu number */
924 	uint16_t		ps_flags;	/* other flags */
925 	lwpid_t			ps_tid;		/* thread id */
926 	pid_t			ps_pid;		/* process PID or -1 */
927 	int		ps_ticks; /* ticks at sample time */
928 	/* pad */
929 	struct thread		*ps_td;		/* which thread */
930 	struct pmc		*ps_pmc;	/* interrupting PMC */
931 	uintptr_t		*ps_pc;		/* (const) callchain start */
932 	uint64_t		ps_tsc;		/* tsc value */
933 };
934 
935 #define 	PMC_SAMPLE_FREE		((uint16_t) 0)
936 #define 	PMC_USER_CALLCHAIN_PENDING	((uint16_t) 0xFFFF)
937 
938 struct pmc_samplebuffer {
939 	volatile uint64_t		ps_prodidx; /* producer index */
940 	volatile uint64_t		ps_considx; /* consumer index */
941 	uintptr_t		*ps_callchains;	/* all saved call chains */
942 	struct pmc_sample	ps_samples[];	/* array of sample entries */
943 };
944 
945 #define PMC_CONS_SAMPLE(psb)					\
946 	(&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask])
947 
948 #define PMC_CONS_SAMPLE_OFF(psb, off)							\
949 	(&(psb)->ps_samples[(off) & pmc_sample_mask])
950 
951 #define PMC_PROD_SAMPLE(psb)					\
952 	(&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask])
953 
954 /*
955  * struct pmc_cpustate
956  *
957  * A CPU is modelled as a collection of HW PMCs with space for additional
958  * flags.
959  */
960 
961 struct pmc_cpu {
962 	uint32_t	pc_state;	/* physical cpu number + flags */
963 	struct pmc_samplebuffer *pc_sb[3]; /* space for samples */
964 	struct pmc_hw	*pc_hwpmcs[];	/* 'npmc' pointers */
965 };
966 
967 #define	PMC_PCPU_CPU_MASK		0x000000FF
968 #define	PMC_PCPU_FLAGS_MASK		0xFFFFFF00
969 #define	PMC_PCPU_FLAGS_SHIFT		8
970 #define	PMC_PCPU_STATE_TO_CPU(S)	((S) & PMC_PCPU_CPU_MASK)
971 #define	PMC_PCPU_STATE_TO_FLAGS(S)	(((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
972 #define	PMC_PCPU_FLAGS_TO_STATE(F)	(((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
973 #define	PMC_PCPU_CPU_TO_STATE(C)	((C) & PMC_PCPU_CPU_MASK)
974 #define	PMC_PCPU_FLAG_HTT		(PMC_PCPU_FLAGS_TO_STATE(0x1))
975 
976 /*
977  * struct pmc_binding
978  *
979  * CPU binding information.
980  */
981 
982 struct pmc_binding {
983 	int	pb_bound;	/* is bound? */
984 	int	pb_cpu;		/* if so, to which CPU */
985 	u_char	pb_priority;	/* Thread active priority. */
986 };
987 
988 struct pmc_mdep;
989 
990 /*
991  * struct pmc_classdep
992  *
993  * PMC class-dependent operations.
994  */
995 struct pmc_classdep {
996 	uint32_t	pcd_caps;	/* class capabilities */
997 	enum pmc_class	pcd_class;	/* class id */
998 	int		pcd_num;	/* number of PMCs */
999 	int		pcd_ri;		/* row index of the first PMC in class */
1000 	int		pcd_width;	/* width of the PMC */
1001 
1002 	/* configuring/reading/writing the hardware PMCs */
1003 	int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
1004 	int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm);
1005 	int (*pcd_read_pmc)(int _cpu, int _ri, struct pmc *_pm,
1006 	    pmc_value_t *_value);
1007 	int (*pcd_write_pmc)(int _cpu, int _ri, struct pmc *_pm,
1008 	    pmc_value_t _value);
1009 
1010 	/* pmc allocation/release */
1011 	int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
1012 		const struct pmc_op_pmcallocate *_a);
1013 	int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
1014 
1015 	/* starting and stopping PMCs */
1016 	int (*pcd_start_pmc)(int _cpu, int _ri, struct pmc *_pm);
1017 	int (*pcd_stop_pmc)(int _cpu, int _ri, struct pmc *_pm);
1018 
1019 	/* description */
1020 	int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
1021 		struct pmc **_ppmc);
1022 
1023 	/* class-dependent initialization & finalization */
1024 	int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1025 	int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1026 
1027 	/* machine-specific interface */
1028 	int (*pcd_get_msr)(int _ri, uint32_t *_msr);
1029 };
1030 
1031 /*
1032  * struct pmc_mdep
1033  *
1034  * Machine dependent bits needed per CPU type.
1035  */
1036 
1037 struct pmc_mdep  {
1038 	uint32_t	pmd_cputype;    /* from enum pmc_cputype */
1039 	uint32_t	pmd_npmc;	/* number of PMCs per CPU */
1040 	uint32_t	pmd_nclass;	/* number of PMC classes present */
1041 
1042 	/*
1043 	 * Machine dependent methods.
1044 	 */
1045 
1046 	/* thread context switch in/out */
1047 	int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp);
1048 	int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp);
1049 
1050 	/* handle a PMC interrupt */
1051 	int (*pmd_intr)(struct trapframe *_tf);
1052 
1053 	/*
1054 	 * PMC class dependent information.
1055 	 */
1056 	struct pmc_classdep pmd_classdep[];
1057 };
1058 
1059 /*
1060  * Per-CPU state.  This is an array of 'mp_ncpu' pointers
1061  * to struct pmc_cpu descriptors.
1062  */
1063 
1064 extern struct pmc_cpu **pmc_pcpu;
1065 
1066 /* driver statistics */
1067 extern struct pmc_driverstats pmc_stats;
1068 
1069 #if	defined(HWPMC_DEBUG)
1070 
1071 /* HWPMC_DEBUG without KTR will compile but is a no-op. */
1072 #if !defined(KTR) || !defined(KTR_COMPILE) || ((KTR_COMPILE & KTR_SUBSYS) == 0)
1073 #error "HWPMC_DEBUG requires KTR and KTR_COMPILE=KTR_SUBSYS -- see ktr(4)"
1074 #endif
1075 
1076 #include <sys/ktr.h>
1077 
1078 #define	__pmcdbg_used		/* unused variable annotation */
1079 
1080 /*
1081  * Debug flags, major flag groups.
1082  *
1083  * Please keep the DEBUGGING section of the hwpmc(4) man page in sync.
1084  */
1085 struct pmc_debugflags {
1086 	int	pdb_CPU;
1087 	int	pdb_CSW;
1088 	int	pdb_LOG;
1089 	int	pdb_MDP;
1090 	int	pdb_MOD;
1091 	int	pdb_OWN;
1092 	int	pdb_PMC;
1093 	int	pdb_PRC;
1094 	int	pdb_SAM;
1095 };
1096 
1097 extern struct pmc_debugflags pmc_debugflags;
1098 
1099 #define	KTR_PMC			KTR_SUBSYS
1100 
1101 #define	PMC_DEBUG_STRSIZE		128
1102 #define	PMC_DEBUG_DEFAULT_FLAGS		{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }
1103 
1104 #define	PMCDBG0(M, N, L, F) do {					\
1105 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1106 		CTR0(KTR_PMC, #M ":" #N ":" #L  ": " F);		\
1107 } while (0)
1108 #define	PMCDBG1(M, N, L, F, p1) do {					\
1109 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1110 		CTR1(KTR_PMC, #M ":" #N ":" #L  ": " F, p1);		\
1111 } while (0)
1112 #define	PMCDBG2(M, N, L, F, p1, p2) do {				\
1113 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1114 		CTR2(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2);	\
1115 } while (0)
1116 #define	PMCDBG3(M, N, L, F, p1, p2, p3) do {				\
1117 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1118 		CTR3(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3);	\
1119 } while (0)
1120 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4) do {			\
1121 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1122 		CTR4(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4);\
1123 } while (0)
1124 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do {			\
1125 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1126 		CTR5(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1127 		    p5);						\
1128 } while (0)
1129 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do {		\
1130 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1131 		CTR6(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1132 		    p5, p6);						\
1133 } while (0)
1134 
1135 /* Major numbers */
1136 #define	PMC_DEBUG_MAJ_CPU		0 /* cpu switches */
1137 #define	PMC_DEBUG_MAJ_CSW		1 /* context switches */
1138 #define	PMC_DEBUG_MAJ_LOG		2 /* logging */
1139 #define	PMC_DEBUG_MAJ_MDP		3 /* machine dependent */
1140 #define	PMC_DEBUG_MAJ_MOD		4 /* misc module infrastructure */
1141 #define	PMC_DEBUG_MAJ_OWN		5 /* owner */
1142 #define	PMC_DEBUG_MAJ_PMC		6 /* pmc management */
1143 #define	PMC_DEBUG_MAJ_PRC		7 /* processes */
1144 #define	PMC_DEBUG_MAJ_SAM		8 /* sampling */
1145 
1146 /* Minor numbers */
1147 
1148 /* Common (8 bits) */
1149 #define	PMC_DEBUG_MIN_ALL		0 /* allocation */
1150 #define	PMC_DEBUG_MIN_REL		1 /* release */
1151 #define	PMC_DEBUG_MIN_OPS		2 /* ops: start, stop, ... */
1152 #define	PMC_DEBUG_MIN_INI		3 /* init */
1153 #define	PMC_DEBUG_MIN_FND		4 /* find */
1154 
1155 /* MODULE */
1156 #define	PMC_DEBUG_MIN_PMH	       14 /* pmc_hook */
1157 #define	PMC_DEBUG_MIN_PMS	       15 /* pmc_syscall */
1158 
1159 /* OWN */
1160 #define	PMC_DEBUG_MIN_ORM		8 /* owner remove */
1161 #define	PMC_DEBUG_MIN_OMR		9 /* owner maybe remove */
1162 
1163 /* PROCESSES */
1164 #define	PMC_DEBUG_MIN_TLK		8 /* link target */
1165 #define	PMC_DEBUG_MIN_TUL		9 /* unlink target */
1166 #define	PMC_DEBUG_MIN_EXT	       10 /* process exit */
1167 #define	PMC_DEBUG_MIN_EXC	       11 /* process exec */
1168 #define	PMC_DEBUG_MIN_FRK	       12 /* process fork */
1169 #define	PMC_DEBUG_MIN_ATT	       13 /* attach/detach */
1170 #define	PMC_DEBUG_MIN_SIG	       14 /* signalling */
1171 
1172 /* CONTEXT SWITCHES */
1173 #define	PMC_DEBUG_MIN_SWI		8 /* switch in */
1174 #define	PMC_DEBUG_MIN_SWO		9 /* switch out */
1175 
1176 /* PMC */
1177 #define	PMC_DEBUG_MIN_REG		8 /* pmc register */
1178 #define	PMC_DEBUG_MIN_ALR		9 /* allocate row */
1179 
1180 /* MACHINE DEPENDENT LAYER */
1181 #define	PMC_DEBUG_MIN_REA		8 /* read */
1182 #define	PMC_DEBUG_MIN_WRI		9 /* write */
1183 #define	PMC_DEBUG_MIN_CFG	       10 /* config */
1184 #define	PMC_DEBUG_MIN_STA	       11 /* start */
1185 #define	PMC_DEBUG_MIN_STO	       12 /* stop */
1186 #define	PMC_DEBUG_MIN_INT	       13 /* interrupts */
1187 
1188 /* CPU */
1189 #define	PMC_DEBUG_MIN_BND		8 /* bind */
1190 #define	PMC_DEBUG_MIN_SEL		9 /* select */
1191 
1192 /* LOG */
1193 #define	PMC_DEBUG_MIN_GTB		8 /* get buf */
1194 #define	PMC_DEBUG_MIN_SIO		9 /* schedule i/o */
1195 #define	PMC_DEBUG_MIN_FLS	       10 /* flush */
1196 #define	PMC_DEBUG_MIN_SAM	       11 /* sample */
1197 #define	PMC_DEBUG_MIN_CLO	       12 /* close */
1198 
1199 #else
1200 #define	__pmcdbg_used			__unused
1201 #define	PMCDBG0(M, N, L, F)		/* nothing */
1202 #define	PMCDBG1(M, N, L, F, p1)
1203 #define	PMCDBG2(M, N, L, F, p1, p2)
1204 #define	PMCDBG3(M, N, L, F, p1, p2, p3)
1205 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4)
1206 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5)
1207 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6)
1208 #endif
1209 
1210 /* declare a dedicated memory pool */
1211 MALLOC_DECLARE(M_PMC);
1212 
1213 /*
1214  * Functions
1215  */
1216 
1217 struct pmc_mdep *pmc_md_initialize(void);	/* MD init function */
1218 void	pmc_md_finalize(struct pmc_mdep *_md);	/* MD fini function */
1219 int	pmc_getrowdisp(int _ri);
1220 int	pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf);
1221 int	pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples,
1222     struct trapframe *_tf);
1223 int	pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples,
1224     struct trapframe *_tf);
1225 void	pmc_restore_cpu_binding(struct pmc_binding *pb);
1226 void	pmc_save_cpu_binding(struct pmc_binding *pb);
1227 void	pmc_select_cpu(int cpu);
1228 struct pmc_mdep *pmc_mdep_alloc(int nclasses);
1229 void pmc_mdep_free(struct pmc_mdep *md);
1230 uint64_t pmc_rdtsc(void);
1231 #endif /* _KERNEL */
1232 #endif /* _SYS_PMC_H_ */
1233