1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008, Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 #ifndef _SYS_PMC_H_ 36 #define _SYS_PMC_H_ 37 38 #include <dev/hwpmc/pmc_events.h> 39 40 #include <machine/pmc_mdep.h> 41 #include <machine/profile.h> 42 43 #define PMC_MODULE_NAME "hwpmc" 44 #define PMC_NAME_MAX 64 /* HW counter name size */ 45 #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ 46 47 /* 48 * Kernel<->userland API version number [MMmmpppp] 49 * 50 * Major numbers are to be incremented when an incompatible change to 51 * the ABI occurs that older clients will not be able to handle. 52 * 53 * Minor numbers are incremented when a backwards compatible change 54 * occurs that allows older correct programs to run unchanged. For 55 * example, when support for a new PMC type is added. 56 * 57 * The patch version is incremented for every bug fix. 58 */ 59 #define PMC_VERSION_MAJOR 0x03 60 #define PMC_VERSION_MINOR 0x01 61 #define PMC_VERSION_PATCH 0x0000 62 63 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ 64 PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) 65 66 /* 67 * Kinds of CPUs known. 68 * 69 * We keep track of CPU variants that need to be distinguished in 70 * some way for PMC operations. CPU names are grouped by manufacturer 71 * and numbered sparsely in order to minimize changes to the ABI involved 72 * when new CPUs are added. 73 */ 74 75 #define __PMC_CPUS() \ 76 __PMC_CPU(AMD_K7, 0x00, "AMD K7") \ 77 __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ 78 __PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \ 79 __PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \ 80 __PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \ 81 __PMC_CPU(INTEL_PII, 0x83, "Intel Pentium II") \ 82 __PMC_CPU(INTEL_PIII, 0x84, "Intel Pentium III") \ 83 __PMC_CPU(INTEL_PM, 0x85, "Intel Pentium M") \ 84 __PMC_CPU(INTEL_PIV, 0x86, "Intel Pentium IV") \ 85 __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ 86 __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ 87 __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ 88 __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ 89 __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ 90 __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ 91 __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ 92 __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ 93 __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ 94 __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ 95 __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ 96 __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ 97 __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ 98 __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ 99 __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ 100 __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \ 101 __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \ 102 __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \ 103 __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \ 104 __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \ 105 __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \ 106 __PMC_CPU(MIPS_OCTEON, 0x201, "Cavium Octeon") \ 107 __PMC_CPU(MIPS_74K, 0x202, "MIPS 74K") \ 108 __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ 109 __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ 110 __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ 111 __PMC_CPU(GENERIC, 0x400, "Generic") \ 112 __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ 113 __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ 114 __PMC_CPU(ARMV7_CORTEX_A8, 0x502, "ARMv7 Cortex A8") \ 115 __PMC_CPU(ARMV7_CORTEX_A9, 0x503, "ARMv7 Cortex A9") \ 116 __PMC_CPU(ARMV7_CORTEX_A15, 0x504, "ARMv7 Cortex A15") \ 117 __PMC_CPU(ARMV7_CORTEX_A17, 0x505, "ARMv7 Cortex A17") \ 118 __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \ 119 __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57") 120 121 enum pmc_cputype { 122 #undef __PMC_CPU 123 #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, 124 __PMC_CPUS() 125 }; 126 127 #define PMC_CPU_FIRST PMC_CPU_AMD_K7 128 #define PMC_CPU_LAST PMC_CPU_GENERIC 129 130 /* 131 * Classes of PMCs 132 */ 133 134 #define __PMC_CLASSES() \ 135 __PMC_CLASS(TSC, 0x00, "CPU Timestamp counter") \ 136 __PMC_CLASS(K7, 0x01, "AMD K7 performance counters") \ 137 __PMC_CLASS(K8, 0x02, "AMD K8 performance counters") \ 138 __PMC_CLASS(P5, 0x03, "Intel Pentium counters") \ 139 __PMC_CLASS(P6, 0x04, "Intel Pentium Pro counters") \ 140 __PMC_CLASS(P4, 0x05, "Intel Pentium-IV counters") \ 141 __PMC_CLASS(IAF, 0x06, "Intel Core2/Atom, fixed function") \ 142 __PMC_CLASS(IAP, 0x07, "Intel Core...Atom, programmable") \ 143 __PMC_CLASS(UCF, 0x08, "Intel Uncore fixed function") \ 144 __PMC_CLASS(UCP, 0x09, "Intel Uncore programmable") \ 145 __PMC_CLASS(XSCALE, 0x0A, "Intel XScale counters") \ 146 __PMC_CLASS(MIPS24K, 0x0B, "MIPS 24K") \ 147 __PMC_CLASS(OCTEON, 0x0C, "Cavium Octeon") \ 148 __PMC_CLASS(PPC7450, 0x0D, "Motorola MPC7450 class") \ 149 __PMC_CLASS(PPC970, 0x0E, "IBM PowerPC 970 class") \ 150 __PMC_CLASS(SOFT, 0x0F, "Software events") \ 151 __PMC_CLASS(ARMV7, 0x10, "ARMv7") \ 152 __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ 153 __PMC_CLASS(MIPS74K, 0x12, "MIPS 74K") \ 154 __PMC_CLASS(E500, 0x13, "Freescale e500 class") 155 156 enum pmc_class { 157 #undef __PMC_CLASS 158 #define __PMC_CLASS(S,V,D) PMC_CLASS_##S = V, 159 __PMC_CLASSES() 160 }; 161 162 #define PMC_CLASS_FIRST PMC_CLASS_TSC 163 #define PMC_CLASS_LAST PMC_CLASS_E500 164 165 /* 166 * A PMC can be in the following states: 167 * 168 * Hardware states: 169 * DISABLED -- administratively prohibited from being used. 170 * FREE -- HW available for use 171 * Software states: 172 * ALLOCATED -- allocated 173 * STOPPED -- allocated, but not counting events 174 * RUNNING -- allocated, and in operation; 'pm_runcount' 175 * holds the number of CPUs using this PMC at 176 * a given instant 177 * DELETED -- being destroyed 178 */ 179 180 #define __PMC_HWSTATES() \ 181 __PMC_STATE(DISABLED) \ 182 __PMC_STATE(FREE) 183 184 #define __PMC_SWSTATES() \ 185 __PMC_STATE(ALLOCATED) \ 186 __PMC_STATE(STOPPED) \ 187 __PMC_STATE(RUNNING) \ 188 __PMC_STATE(DELETED) 189 190 #define __PMC_STATES() \ 191 __PMC_HWSTATES() \ 192 __PMC_SWSTATES() 193 194 enum pmc_state { 195 #undef __PMC_STATE 196 #define __PMC_STATE(S) PMC_STATE_##S, 197 __PMC_STATES() 198 __PMC_STATE(MAX) 199 }; 200 201 #define PMC_STATE_FIRST PMC_STATE_DISABLED 202 #define PMC_STATE_LAST PMC_STATE_DELETED 203 204 /* 205 * An allocated PMC may used as a 'global' counter or as a 206 * 'thread-private' one. Each such mode of use can be in either 207 * statistical sampling mode or in counting mode. Thus a PMC in use 208 * 209 * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling 210 * SC i.e., SYSTEM COUNTER -- system-wide counting mode 211 * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling 212 * TC i.e., THREAD COUNTER -- thread virtual, counting mode 213 * 214 * Statistical profiling modes rely on the PMC periodically delivering 215 * a interrupt to the CPU (when the configured number of events have 216 * been measured), so the PMC must have the ability to generate 217 * interrupts. 218 * 219 * In counting modes, the PMC counts its configured events, with the 220 * value of the PMC being read whenever needed by its owner process. 221 * 222 * The thread specific modes "virtualize" the PMCs -- the PMCs appear 223 * to be thread private and count events only when the profiled thread 224 * actually executes on the CPU. 225 * 226 * The system-wide "global" modes keep the PMCs running all the time 227 * and are used to measure the behaviour of the whole system. 228 */ 229 230 #define __PMC_MODES() \ 231 __PMC_MODE(SS, 0) \ 232 __PMC_MODE(SC, 1) \ 233 __PMC_MODE(TS, 2) \ 234 __PMC_MODE(TC, 3) 235 236 enum pmc_mode { 237 #undef __PMC_MODE 238 #define __PMC_MODE(M,N) PMC_MODE_##M = N, 239 __PMC_MODES() 240 }; 241 242 #define PMC_MODE_FIRST PMC_MODE_SS 243 #define PMC_MODE_LAST PMC_MODE_TC 244 245 #define PMC_IS_COUNTING_MODE(mode) \ 246 ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) 247 #define PMC_IS_SYSTEM_MODE(mode) \ 248 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) 249 #define PMC_IS_SAMPLING_MODE(mode) \ 250 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) 251 #define PMC_IS_VIRTUAL_MODE(mode) \ 252 ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) 253 254 /* 255 * PMC row disposition 256 */ 257 258 #define __PMC_DISPOSITIONS(N) \ 259 __PMC_DISP(STANDALONE) /* global/disabled counters */ \ 260 __PMC_DISP(FREE) /* free/available */ \ 261 __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ 262 __PMC_DISP(UNKNOWN) /* sentinel */ 263 264 enum pmc_disp { 265 #undef __PMC_DISP 266 #define __PMC_DISP(D) PMC_DISP_##D , 267 __PMC_DISPOSITIONS() 268 }; 269 270 #define PMC_DISP_FIRST PMC_DISP_STANDALONE 271 #define PMC_DISP_LAST PMC_DISP_THREAD 272 273 /* 274 * Counter capabilities 275 * 276 * __PMC_CAPS(NAME, VALUE, DESCRIPTION) 277 */ 278 279 #define __PMC_CAPS() \ 280 __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ 281 __PMC_CAP(USER, 1, "count user-mode events") \ 282 __PMC_CAP(SYSTEM, 2, "count system-mode events") \ 283 __PMC_CAP(EDGE, 3, "do edge detection of events") \ 284 __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ 285 __PMC_CAP(READ, 5, "read PMC counter") \ 286 __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ 287 __PMC_CAP(INVERT, 7, "invert comparison sense") \ 288 __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ 289 __PMC_CAP(PRECISE, 9, "perform precise sampling") \ 290 __PMC_CAP(TAGGING, 10, "tag upstream events") \ 291 __PMC_CAP(CASCADE, 11, "cascade counters") 292 293 enum pmc_caps 294 { 295 #undef __PMC_CAP 296 #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , 297 __PMC_CAPS() 298 }; 299 300 #define PMC_CAP_FIRST PMC_CAP_INTERRUPT 301 #define PMC_CAP_LAST PMC_CAP_CASCADE 302 303 /* 304 * PMC Event Numbers 305 * 306 * These are generated from the definitions in "dev/hwpmc/pmc_events.h". 307 */ 308 309 enum pmc_event { 310 #undef __PMC_EV 311 #undef __PMC_EV_BLOCK 312 #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , 313 #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , 314 __PMC_EVENTS() 315 }; 316 317 /* 318 * PMC SYSCALL INTERFACE 319 */ 320 321 /* 322 * "PMC_OPS" -- these are the commands recognized by the kernel 323 * module, and are used when performing a system call from userland. 324 */ 325 #define __PMC_OPS() \ 326 __PMC_OP(CONFIGURELOG, "Set log file") \ 327 __PMC_OP(FLUSHLOG, "Flush log file") \ 328 __PMC_OP(GETCPUINFO, "Get system CPU information") \ 329 __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ 330 __PMC_OP(GETMODULEVERSION, "Get module version") \ 331 __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ 332 __PMC_OP(PMCADMIN, "Set PMC state") \ 333 __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ 334 __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ 335 __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ 336 __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ 337 __PMC_OP(PMCRELEASE, "Release a PMC") \ 338 __PMC_OP(PMCRW, "Read/Set a PMC") \ 339 __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ 340 __PMC_OP(PMCSTART, "Start a PMC") \ 341 __PMC_OP(PMCSTOP, "Stop a PMC") \ 342 __PMC_OP(WRITELOG, "Write a cookie to the log file") \ 343 __PMC_OP(CLOSELOG, "Close log file") \ 344 __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") 345 346 347 enum pmc_ops { 348 #undef __PMC_OP 349 #define __PMC_OP(N, D) PMC_OP_##N, 350 __PMC_OPS() 351 }; 352 353 354 /* 355 * Flags used in operations on PMCs. 356 */ 357 358 #define PMC_F_FORCE 0x00000001 /*OP ADMIN force operation */ 359 #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ 360 #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ 361 #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ 362 #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ 363 #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ 364 #define PMC_F_KGMON 0x00000040 /*OP ALLOCATE kgmon(8) profiling */ 365 /* V2 API */ 366 #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ 367 368 /* internal flags */ 369 #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ 370 #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ 371 #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ 372 373 #define PMC_CALLCHAIN_DEPTH_MAX 128 374 375 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ 376 377 /* 378 * Cookies used to denote allocated PMCs, and the values of PMCs. 379 */ 380 381 typedef uint32_t pmc_id_t; 382 typedef uint64_t pmc_value_t; 383 384 #define PMC_ID_INVALID (~ (pmc_id_t) 0) 385 386 /* 387 * PMC IDs have the following format: 388 * 389 * +--------+----------+-----------+-----------+ 390 * | CPU | PMC MODE | PMC CLASS | ROW INDEX | 391 * +--------+----------+-----------+-----------+ 392 * 393 * where each field is 8 bits wide. Field 'CPU' is set to the 394 * requested CPU for system-wide PMCs or PMC_CPU_ANY for process-mode 395 * PMCs. Field 'PMC MODE' is the allocated PMC mode. Field 'PMC 396 * CLASS' is the class of the PMC. Field 'ROW INDEX' is the row index 397 * for the PMC. 398 * 399 * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total 400 * number of hardware PMCs on this cpu. 401 */ 402 403 404 #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) 405 #define PMC_ID_TO_CLASS(ID) (((ID) & 0xFF00) >> 8) 406 #define PMC_ID_TO_MODE(ID) (((ID) & 0xFF0000) >> 16) 407 #define PMC_ID_TO_CPU(ID) (((ID) & 0xFF000000) >> 24) 408 #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ 409 ((((CPU) & 0xFF) << 24) | (((MODE) & 0xFF) << 16) | \ 410 (((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF)) 411 412 /* 413 * Data structures for system calls supported by the pmc driver. 414 */ 415 416 /* 417 * OP PMCALLOCATE 418 * 419 * Allocate a PMC on the named CPU. 420 */ 421 422 #define PMC_CPU_ANY ~0 423 424 struct pmc_op_pmcallocate { 425 uint32_t pm_caps; /* PMC_CAP_* */ 426 uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ 427 enum pmc_class pm_class; /* class of PMC desired */ 428 enum pmc_event pm_ev; /* [enum pmc_event] desired */ 429 uint32_t pm_flags; /* additional modifiers PMC_F_* */ 430 enum pmc_mode pm_mode; /* desired mode */ 431 pmc_id_t pm_pmcid; /* [return] process pmc id */ 432 433 union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ 434 }; 435 436 /* 437 * OP PMCADMIN 438 * 439 * Set the administrative state (i.e., whether enabled or disabled) of 440 * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an 441 * absolute PMC number and need not have been first allocated by the 442 * calling process. 443 */ 444 445 struct pmc_op_pmcadmin { 446 int pm_cpu; /* CPU# */ 447 uint32_t pm_flags; /* flags */ 448 int pm_pmc; /* PMC# */ 449 enum pmc_state pm_state; /* desired state */ 450 }; 451 452 /* 453 * OP PMCATTACH / OP PMCDETACH 454 * 455 * Attach/detach a PMC and a process. 456 */ 457 458 struct pmc_op_pmcattach { 459 pmc_id_t pm_pmc; /* PMC to attach to */ 460 pid_t pm_pid; /* target process */ 461 }; 462 463 /* 464 * OP PMCSETCOUNT 465 * 466 * Set the sampling rate (i.e., the reload count) for statistical counters. 467 * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. 468 */ 469 470 struct pmc_op_pmcsetcount { 471 pmc_value_t pm_count; /* initial/sample count */ 472 pmc_id_t pm_pmcid; /* PMC id to set */ 473 }; 474 475 476 /* 477 * OP PMCRW 478 * 479 * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs 480 * to have been previously allocated using PMCALLOCATE. 481 */ 482 483 484 struct pmc_op_pmcrw { 485 uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ 486 pmc_id_t pm_pmcid; /* pmc id */ 487 pmc_value_t pm_value; /* new&returned value */ 488 }; 489 490 491 /* 492 * OP GETPMCINFO 493 * 494 * retrieve PMC state for a named CPU. The caller is expected to 495 * allocate 'npmc' * 'struct pmc_info' bytes of space for the return 496 * values. 497 */ 498 499 struct pmc_info { 500 char pm_name[PMC_NAME_MAX]; /* pmc name */ 501 enum pmc_class pm_class; /* enum pmc_class */ 502 int pm_enabled; /* whether enabled */ 503 enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ 504 pid_t pm_ownerpid; /* owner, or -1 */ 505 enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ 506 enum pmc_event pm_event; /* current event */ 507 uint32_t pm_flags; /* current flags */ 508 pmc_value_t pm_reloadcount; /* sampling counters only */ 509 }; 510 511 struct pmc_op_getpmcinfo { 512 int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ 513 struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ 514 }; 515 516 517 /* 518 * OP GETCPUINFO 519 * 520 * Retrieve system CPU information. 521 */ 522 523 524 struct pmc_classinfo { 525 enum pmc_class pm_class; /* class id */ 526 uint32_t pm_caps; /* counter capabilities */ 527 uint32_t pm_width; /* width of the PMC */ 528 uint32_t pm_num; /* number of PMCs in class */ 529 }; 530 531 struct pmc_op_getcpuinfo { 532 enum pmc_cputype pm_cputype; /* what kind of CPU */ 533 uint32_t pm_ncpu; /* max CPU number */ 534 uint32_t pm_npmc; /* #PMCs per CPU */ 535 uint32_t pm_nclass; /* #classes of PMCs */ 536 struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; 537 }; 538 539 /* 540 * OP CONFIGURELOG 541 * 542 * Configure a log file for writing system-wide statistics to. 543 */ 544 545 struct pmc_op_configurelog { 546 int pm_flags; 547 int pm_logfd; /* logfile fd (or -1) */ 548 }; 549 550 /* 551 * OP GETDRIVERSTATS 552 * 553 * Retrieve pmc(4) driver-wide statistics. 554 */ 555 556 struct pmc_op_getdriverstats { 557 unsigned int pm_intr_ignored; /* #interrupts ignored */ 558 unsigned int pm_intr_processed; /* #interrupts processed */ 559 unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ 560 unsigned int pm_syscalls; /* #syscalls */ 561 unsigned int pm_syscall_errors; /* #syscalls with errors */ 562 unsigned int pm_buffer_requests; /* #buffer requests */ 563 unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ 564 unsigned int pm_log_sweeps; /* #sample buffer processing 565 passes */ 566 }; 567 568 /* 569 * OP RELEASE / OP START / OP STOP 570 * 571 * Simple operations on a PMC id. 572 */ 573 574 struct pmc_op_simple { 575 pmc_id_t pm_pmcid; 576 }; 577 578 /* 579 * OP WRITELOG 580 * 581 * Flush the current log buffer and write 4 bytes of user data to it. 582 */ 583 584 struct pmc_op_writelog { 585 uint32_t pm_userdata; 586 }; 587 588 /* 589 * OP GETMSR 590 * 591 * Retrieve the machine specific address associated with the allocated 592 * PMC. This number can be used subsequently with a read-performance-counter 593 * instruction. 594 */ 595 596 struct pmc_op_getmsr { 597 uint32_t pm_msr; /* machine specific address */ 598 pmc_id_t pm_pmcid; /* allocated pmc id */ 599 }; 600 601 /* 602 * OP GETDYNEVENTINFO 603 * 604 * Retrieve a PMC dynamic class events list. 605 */ 606 607 struct pmc_dyn_event_descr { 608 char pm_ev_name[PMC_NAME_MAX]; 609 enum pmc_event pm_ev_code; 610 }; 611 612 struct pmc_op_getdyneventinfo { 613 enum pmc_class pm_class; 614 unsigned int pm_nevent; 615 struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; 616 }; 617 618 #ifdef _KERNEL 619 620 #include <sys/malloc.h> 621 #include <sys/sysctl.h> 622 #include <sys/_cpuset.h> 623 624 #include <machine/frame.h> 625 626 #define PMC_HASH_SIZE 1024 627 #define PMC_MTXPOOL_SIZE 2048 628 #define PMC_LOG_BUFFER_SIZE 4 629 #define PMC_NLOGBUFFERS 1024 630 #define PMC_NSAMPLES 1024 631 #define PMC_CALLCHAIN_DEPTH 32 632 633 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." 634 635 /* 636 * Locking keys 637 * 638 * (b) - pmc_bufferlist_mtx (spin lock) 639 * (k) - pmc_kthread_mtx (sleep lock) 640 * (o) - po->po_mtx (spin lock) 641 */ 642 643 /* 644 * PMC commands 645 */ 646 647 struct pmc_syscall_args { 648 register_t pmop_code; /* one of PMC_OP_* */ 649 void *pmop_data; /* syscall parameter */ 650 }; 651 652 /* 653 * Interface to processor specific s1tuff 654 */ 655 656 /* 657 * struct pmc_descr 658 * 659 * Machine independent (i.e., the common parts) of a human readable 660 * PMC description. 661 */ 662 663 struct pmc_descr { 664 char pd_name[PMC_NAME_MAX]; /* name */ 665 uint32_t pd_caps; /* capabilities */ 666 enum pmc_class pd_class; /* class of the PMC */ 667 uint32_t pd_width; /* width in bits */ 668 }; 669 670 /* 671 * struct pmc_target 672 * 673 * This structure records all the target processes associated with a 674 * PMC. 675 */ 676 677 struct pmc_target { 678 LIST_ENTRY(pmc_target) pt_next; 679 struct pmc_process *pt_process; /* target descriptor */ 680 }; 681 682 /* 683 * struct pmc 684 * 685 * Describes each allocated PMC. 686 * 687 * Each PMC has precisely one owner, namely the process that allocated 688 * the PMC. 689 * 690 * A PMC may be attached to multiple target processes. The 691 * 'pm_targets' field links all the target processes being monitored 692 * by this PMC. 693 * 694 * The 'pm_savedvalue' field is protected by a mutex. 695 * 696 * On a multi-cpu machine, multiple target threads associated with a 697 * process-virtual PMC could be concurrently executing on different 698 * CPUs. The 'pm_runcount' field is atomically incremented every time 699 * the PMC gets scheduled on a CPU and atomically decremented when it 700 * get descheduled. Deletion of a PMC is only permitted when this 701 * field is '0'. 702 * 703 */ 704 705 struct pmc { 706 LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ 707 LIST_ENTRY(pmc) pm_next; /* owner's list */ 708 709 /* 710 * System-wide PMCs are allocated on a CPU and are not moved 711 * around. For system-wide PMCs we record the CPU the PMC was 712 * allocated on in the 'CPU' field of the pmc ID. 713 * 714 * Virtual PMCs run on whichever CPU is currently executing 715 * their targets' threads. For these PMCs we need to save 716 * their current PMC counter values when they are taken off 717 * CPU. 718 */ 719 720 union { 721 pmc_value_t pm_savedvalue; /* Virtual PMCS */ 722 } pm_gv; 723 724 /* 725 * For sampling mode PMCs, we keep track of the PMC's "reload 726 * count", which is the counter value to be loaded in when 727 * arming the PMC for the next counting session. For counting 728 * modes on PMCs that are read-only (e.g., the x86 TSC), we 729 * keep track of the initial value at the start of 730 * counting-mode operation. 731 */ 732 733 union { 734 pmc_value_t pm_reloadcount; /* sampling PMC modes */ 735 pmc_value_t pm_initial; /* counting PMC modes */ 736 } pm_sc; 737 738 volatile cpuset_t pm_stalled; /* marks stalled sampling PMCs */ 739 volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ 740 uint32_t pm_caps; /* PMC capabilities */ 741 enum pmc_event pm_event; /* event being measured */ 742 uint32_t pm_flags; /* additional flags PMC_F_... */ 743 struct pmc_owner *pm_owner; /* owner thread state */ 744 int pm_runcount; /* #cpus currently on */ 745 enum pmc_state pm_state; /* current PMC state */ 746 uint32_t pm_overflowcnt; /* count overflow interrupts */ 747 748 /* 749 * The PMC ID field encodes the row-index for the PMC, its 750 * mode, class and the CPU# associated with the PMC. 751 */ 752 753 pmc_id_t pm_id; /* allocated PMC id */ 754 755 /* md extensions */ 756 union pmc_md_pmc pm_md; 757 }; 758 759 /* 760 * Accessor macros for 'struct pmc' 761 */ 762 763 #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) 764 #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) 765 #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) 766 #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) 767 768 769 /* 770 * struct pmc_process 771 * 772 * Record a 'target' process being profiled. 773 * 774 * The target process being profiled could be different from the owner 775 * process which allocated the PMCs. Each target process descriptor 776 * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a 777 * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' 778 * array. The size of this structure is thus PMC architecture 779 * dependent. 780 * 781 */ 782 783 struct pmc_targetstate { 784 struct pmc *pp_pmc; /* target PMC */ 785 pmc_value_t pp_pmcval; /* per-process value */ 786 }; 787 788 struct pmc_process { 789 LIST_ENTRY(pmc_process) pp_next; /* hash chain */ 790 int pp_refcnt; /* reference count */ 791 uint32_t pp_flags; /* flags PMC_PP_* */ 792 struct proc *pp_proc; /* target thread */ 793 struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ 794 }; 795 796 #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 797 798 /* 799 * struct pmc_owner 800 * 801 * We associate a PMC with an 'owner' process. 802 * 803 * A process can be associated with 0..NCPUS*NHWPMC PMCs during its 804 * lifetime, where NCPUS is the numbers of CPUS in the system and 805 * NHWPMC is the number of hardware PMCs per CPU. These are 806 * maintained in the list headed by the 'po_pmcs' to save on space. 807 * 808 */ 809 810 struct pmc_owner { 811 LIST_ENTRY(pmc_owner) po_next; /* hash chain */ 812 LIST_ENTRY(pmc_owner) po_ssnext; /* list of SS PMC owners */ 813 LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ 814 TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ 815 struct mtx po_mtx; /* spin lock for (o) */ 816 struct proc *po_owner; /* owner proc */ 817 uint32_t po_flags; /* (k) flags PMC_PO_* */ 818 struct proc *po_kthread; /* (k) helper kthread */ 819 struct pmclog_buffer *po_curbuf; /* current log buffer */ 820 struct file *po_file; /* file reference */ 821 int po_error; /* recorded error */ 822 short po_sscount; /* # SS PMCs owned */ 823 short po_logprocmaps; /* global mappings done */ 824 }; 825 826 #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ 827 #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ 828 #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 829 830 /* 831 * struct pmc_hw -- describe the state of the PMC hardware 832 * 833 * When in use, a HW PMC is associated with one allocated 'struct pmc' 834 * pointed to by field 'phw_pmc'. When inactive, this field is NULL. 835 * 836 * On an SMP box, one or more HW PMC's in process virtual mode with 837 * the same 'phw_pmc' could be executing on different CPUs. In order 838 * to handle this case correctly, we need to ensure that only 839 * incremental counts get added to the saved value in the associated 840 * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC 841 * value at the time the hardware is started during this context 842 * switch (i.e., the difference between the new (hardware) count and 843 * the saved count is atomically added to the count field in 'struct 844 * pmc' at context switch time). 845 * 846 */ 847 848 struct pmc_hw { 849 uint32_t phw_state; /* see PHW_* macros below */ 850 struct pmc *phw_pmc; /* current thread PMC */ 851 }; 852 853 #define PMC_PHW_RI_MASK 0x000000FF 854 #define PMC_PHW_CPU_SHIFT 8 855 #define PMC_PHW_CPU_MASK 0x0000FF00 856 #define PMC_PHW_FLAGS_SHIFT 16 857 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 858 859 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) 860 #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) 861 #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ 862 PMC_PHW_CPU_MASK) 863 #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ 864 PMC_PHW_CPU_SHIFT) 865 #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ 866 PMC_PHW_FLAGS_MASK) 867 #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ 868 PMC_PHW_FLAGS_SHIFT) 869 #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) 870 #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) 871 872 /* 873 * struct pmc_sample 874 * 875 * Space for N (tunable) PC samples and associated control data. 876 */ 877 878 struct pmc_sample { 879 uint16_t ps_nsamples; /* callchain depth */ 880 uint8_t ps_cpu; /* cpu number */ 881 uint8_t ps_flags; /* other flags */ 882 pid_t ps_pid; /* process PID or -1 */ 883 struct thread *ps_td; /* which thread */ 884 struct pmc *ps_pmc; /* interrupting PMC */ 885 uintptr_t *ps_pc; /* (const) callchain start */ 886 }; 887 888 #define PMC_SAMPLE_FREE ((uint16_t) 0) 889 #define PMC_SAMPLE_INUSE ((uint16_t) 0xFFFF) 890 891 struct pmc_samplebuffer { 892 struct pmc_sample * volatile ps_read; /* read pointer */ 893 struct pmc_sample * volatile ps_write; /* write pointer */ 894 uintptr_t *ps_callchains; /* all saved call chains */ 895 struct pmc_sample *ps_fence; /* one beyond ps_samples[] */ 896 struct pmc_sample ps_samples[]; /* array of sample entries */ 897 }; 898 899 900 /* 901 * struct pmc_cpustate 902 * 903 * A CPU is modelled as a collection of HW PMCs with space for additional 904 * flags. 905 */ 906 907 struct pmc_cpu { 908 uint32_t pc_state; /* physical cpu number + flags */ 909 struct pmc_samplebuffer *pc_sb[2]; /* space for samples */ 910 struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ 911 }; 912 913 #define PMC_PCPU_CPU_MASK 0x000000FF 914 #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 915 #define PMC_PCPU_FLAGS_SHIFT 8 916 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) 917 #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) 918 #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) 919 #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) 920 #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) 921 922 /* 923 * struct pmc_binding 924 * 925 * CPU binding information. 926 */ 927 928 struct pmc_binding { 929 int pb_bound; /* is bound? */ 930 int pb_cpu; /* if so, to which CPU */ 931 }; 932 933 934 struct pmc_mdep; 935 936 /* 937 * struct pmc_classdep 938 * 939 * PMC class-dependent operations. 940 */ 941 struct pmc_classdep { 942 uint32_t pcd_caps; /* class capabilities */ 943 enum pmc_class pcd_class; /* class id */ 944 int pcd_num; /* number of PMCs */ 945 int pcd_ri; /* row index of the first PMC in class */ 946 int pcd_width; /* width of the PMC */ 947 948 /* configuring/reading/writing the hardware PMCs */ 949 int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); 950 int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); 951 int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value); 952 int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value); 953 954 /* pmc allocation/release */ 955 int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, 956 const struct pmc_op_pmcallocate *_a); 957 int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); 958 959 /* starting and stopping PMCs */ 960 int (*pcd_start_pmc)(int _cpu, int _ri); 961 int (*pcd_stop_pmc)(int _cpu, int _ri); 962 963 /* description */ 964 int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, 965 struct pmc **_ppmc); 966 967 /* class-dependent initialization & finalization */ 968 int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); 969 int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); 970 971 /* machine-specific interface */ 972 int (*pcd_get_msr)(int _ri, uint32_t *_msr); 973 }; 974 975 /* 976 * struct pmc_mdep 977 * 978 * Machine dependent bits needed per CPU type. 979 */ 980 981 struct pmc_mdep { 982 uint32_t pmd_cputype; /* from enum pmc_cputype */ 983 uint32_t pmd_npmc; /* number of PMCs per CPU */ 984 uint32_t pmd_nclass; /* number of PMC classes present */ 985 986 /* 987 * Machine dependent methods. 988 */ 989 990 /* per-cpu initialization and finalization */ 991 int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu); 992 int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); 993 994 /* thread context switch in/out */ 995 int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); 996 int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); 997 998 /* handle a PMC interrupt */ 999 int (*pmd_intr)(int _cpu, struct trapframe *_tf); 1000 1001 /* 1002 * PMC class dependent information. 1003 */ 1004 struct pmc_classdep pmd_classdep[]; 1005 }; 1006 1007 /* 1008 * Per-CPU state. This is an array of 'mp_ncpu' pointers 1009 * to struct pmc_cpu descriptors. 1010 */ 1011 1012 extern struct pmc_cpu **pmc_pcpu; 1013 1014 /* driver statistics */ 1015 extern struct pmc_op_getdriverstats pmc_stats; 1016 1017 #if defined(HWPMC_DEBUG) 1018 #include <sys/ktr.h> 1019 1020 /* debug flags, major flag groups */ 1021 struct pmc_debugflags { 1022 int pdb_CPU; 1023 int pdb_CSW; 1024 int pdb_LOG; 1025 int pdb_MDP; 1026 int pdb_MOD; 1027 int pdb_OWN; 1028 int pdb_PMC; 1029 int pdb_PRC; 1030 int pdb_SAM; 1031 }; 1032 1033 extern struct pmc_debugflags pmc_debugflags; 1034 1035 #define KTR_PMC KTR_SUBSYS 1036 1037 #define PMC_DEBUG_STRSIZE 128 1038 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 1039 1040 #define PMCDBG0(M, N, L, F) do { \ 1041 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1042 CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ 1043 } while (0) 1044 #define PMCDBG1(M, N, L, F, p1) do { \ 1045 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1046 CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ 1047 } while (0) 1048 #define PMCDBG2(M, N, L, F, p1, p2) do { \ 1049 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1050 CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ 1051 } while (0) 1052 #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ 1053 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1054 CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ 1055 } while (0) 1056 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ 1057 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1058 CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ 1059 } while (0) 1060 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ 1061 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1062 CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1063 p5); \ 1064 } while (0) 1065 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ 1066 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1067 CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1068 p5, p6); \ 1069 } while (0) 1070 1071 /* Major numbers */ 1072 #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ 1073 #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ 1074 #define PMC_DEBUG_MAJ_LOG 2 /* logging */ 1075 #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ 1076 #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ 1077 #define PMC_DEBUG_MAJ_OWN 5 /* owner */ 1078 #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ 1079 #define PMC_DEBUG_MAJ_PRC 7 /* processes */ 1080 #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ 1081 1082 /* Minor numbers */ 1083 1084 /* Common (8 bits) */ 1085 #define PMC_DEBUG_MIN_ALL 0 /* allocation */ 1086 #define PMC_DEBUG_MIN_REL 1 /* release */ 1087 #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ 1088 #define PMC_DEBUG_MIN_INI 3 /* init */ 1089 #define PMC_DEBUG_MIN_FND 4 /* find */ 1090 1091 /* MODULE */ 1092 #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ 1093 #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ 1094 1095 /* OWN */ 1096 #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ 1097 #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ 1098 1099 /* PROCESSES */ 1100 #define PMC_DEBUG_MIN_TLK 8 /* link target */ 1101 #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ 1102 #define PMC_DEBUG_MIN_EXT 10 /* process exit */ 1103 #define PMC_DEBUG_MIN_EXC 11 /* process exec */ 1104 #define PMC_DEBUG_MIN_FRK 12 /* process fork */ 1105 #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ 1106 #define PMC_DEBUG_MIN_SIG 14 /* signalling */ 1107 1108 /* CONTEXT SWITCHES */ 1109 #define PMC_DEBUG_MIN_SWI 8 /* switch in */ 1110 #define PMC_DEBUG_MIN_SWO 9 /* switch out */ 1111 1112 /* PMC */ 1113 #define PMC_DEBUG_MIN_REG 8 /* pmc register */ 1114 #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ 1115 1116 /* MACHINE DEPENDENT LAYER */ 1117 #define PMC_DEBUG_MIN_REA 8 /* read */ 1118 #define PMC_DEBUG_MIN_WRI 9 /* write */ 1119 #define PMC_DEBUG_MIN_CFG 10 /* config */ 1120 #define PMC_DEBUG_MIN_STA 11 /* start */ 1121 #define PMC_DEBUG_MIN_STO 12 /* stop */ 1122 #define PMC_DEBUG_MIN_INT 13 /* interrupts */ 1123 1124 /* CPU */ 1125 #define PMC_DEBUG_MIN_BND 8 /* bind */ 1126 #define PMC_DEBUG_MIN_SEL 9 /* select */ 1127 1128 /* LOG */ 1129 #define PMC_DEBUG_MIN_GTB 8 /* get buf */ 1130 #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ 1131 #define PMC_DEBUG_MIN_FLS 10 /* flush */ 1132 #define PMC_DEBUG_MIN_SAM 11 /* sample */ 1133 #define PMC_DEBUG_MIN_CLO 12 /* close */ 1134 1135 #else 1136 #define PMCDBG0(M, N, L, F) /* nothing */ 1137 #define PMCDBG1(M, N, L, F, p1) 1138 #define PMCDBG2(M, N, L, F, p1, p2) 1139 #define PMCDBG3(M, N, L, F, p1, p2, p3) 1140 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) 1141 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) 1142 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) 1143 #endif 1144 1145 /* declare a dedicated memory pool */ 1146 MALLOC_DECLARE(M_PMC); 1147 1148 /* 1149 * Functions 1150 */ 1151 1152 struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ 1153 void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ 1154 int pmc_getrowdisp(int _ri); 1155 int pmc_process_interrupt(int _cpu, int _soft, struct pmc *_pm, 1156 struct trapframe *_tf, int _inuserspace); 1157 int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, 1158 struct trapframe *_tf); 1159 int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, 1160 struct trapframe *_tf); 1161 struct pmc_mdep *pmc_mdep_alloc(int nclasses); 1162 void pmc_mdep_free(struct pmc_mdep *md); 1163 #endif /* _KERNEL */ 1164 #endif /* _SYS_PMC_H_ */ 1165