xref: /linux/arch/x86/kernel/cpu/perfctr-watchdog.c (revision a8383dfb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * local apic based NMI watchdog for various CPUs.
4  *
5  * This file also handles reservation of performance counters for coordination
6  * with other users.
7  *
8  * Note that these events normally don't tick when the CPU idles. This means
9  * the frequency varies with CPU load.
10  *
11  * Original code for K7/P6 written by Keith Owens
12  *
13  */
14 
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/kernel.h>
18 #include <linux/bitops.h>
19 #include <linux/smp.h>
20 #include <asm/nmi.h>
21 #include <linux/kprobes.h>
22 
23 #include <asm/apic.h>
24 #include <asm/perf_event.h>
25 
26 /*
27  * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
28  * offset from MSR_P4_BSU_ESCR0.
29  *
30  * It will be the max for all platforms (for now)
31  */
32 #define NMI_MAX_COUNTER_BITS 66
33 
34 /*
35  * perfctr_nmi_owner tracks the ownership of the perfctr registers:
36  * evtsel_nmi_owner tracks the ownership of the event selection
37  * - different performance counters/ event selection may be reserved for
38  *   different subsystems this reservation system just tries to coordinate
39  *   things a little
40  */
41 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
42 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
43 
44 /* converts an msr to an appropriate reservation bit */
nmi_perfctr_msr_to_bit(unsigned int msr)45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
46 {
47 	/* returns the bit offset of the performance counter register */
48 	switch (boot_cpu_data.x86_vendor) {
49 	case X86_VENDOR_HYGON:
50 	case X86_VENDOR_AMD:
51 		if (msr >= MSR_F15H_PERF_CTR)
52 			return (msr - MSR_F15H_PERF_CTR) >> 1;
53 		return msr - MSR_K7_PERFCTR0;
54 	case X86_VENDOR_INTEL:
55 		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
56 			return msr - MSR_ARCH_PERFMON_PERFCTR0;
57 
58 		switch (boot_cpu_data.x86) {
59 		case 6:
60 			return msr - MSR_P6_PERFCTR0;
61 		case 11:
62 			return msr - MSR_KNC_PERFCTR0;
63 		case 15:
64 			return msr - MSR_P4_BPU_PERFCTR0;
65 		}
66 		break;
67 	case X86_VENDOR_ZHAOXIN:
68 	case X86_VENDOR_CENTAUR:
69 		return msr - MSR_ARCH_PERFMON_PERFCTR0;
70 	}
71 	return 0;
72 }
73 
74 /*
75  * converts an msr to an appropriate reservation bit
76  * returns the bit offset of the event selection register
77  */
nmi_evntsel_msr_to_bit(unsigned int msr)78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
79 {
80 	/* returns the bit offset of the event selection register */
81 	switch (boot_cpu_data.x86_vendor) {
82 	case X86_VENDOR_HYGON:
83 	case X86_VENDOR_AMD:
84 		if (msr >= MSR_F15H_PERF_CTL)
85 			return (msr - MSR_F15H_PERF_CTL) >> 1;
86 		return msr - MSR_K7_EVNTSEL0;
87 	case X86_VENDOR_INTEL:
88 		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
89 			return msr - MSR_ARCH_PERFMON_EVENTSEL0;
90 
91 		switch (boot_cpu_data.x86) {
92 		case 6:
93 			return msr - MSR_P6_EVNTSEL0;
94 		case 11:
95 			return msr - MSR_KNC_EVNTSEL0;
96 		case 15:
97 			return msr - MSR_P4_BSU_ESCR0;
98 		}
99 		break;
100 	case X86_VENDOR_ZHAOXIN:
101 	case X86_VENDOR_CENTAUR:
102 		return msr - MSR_ARCH_PERFMON_EVENTSEL0;
103 	}
104 	return 0;
105 
106 }
107 
reserve_perfctr_nmi(unsigned int msr)108 int reserve_perfctr_nmi(unsigned int msr)
109 {
110 	unsigned int counter;
111 
112 	counter = nmi_perfctr_msr_to_bit(msr);
113 	/* register not managed by the allocator? */
114 	if (counter > NMI_MAX_COUNTER_BITS)
115 		return 1;
116 
117 	if (!test_and_set_bit(counter, perfctr_nmi_owner))
118 		return 1;
119 	return 0;
120 }
121 EXPORT_SYMBOL(reserve_perfctr_nmi);
122 
release_perfctr_nmi(unsigned int msr)123 void release_perfctr_nmi(unsigned int msr)
124 {
125 	unsigned int counter;
126 
127 	counter = nmi_perfctr_msr_to_bit(msr);
128 	/* register not managed by the allocator? */
129 	if (counter > NMI_MAX_COUNTER_BITS)
130 		return;
131 
132 	clear_bit(counter, perfctr_nmi_owner);
133 }
134 EXPORT_SYMBOL(release_perfctr_nmi);
135 
reserve_evntsel_nmi(unsigned int msr)136 int reserve_evntsel_nmi(unsigned int msr)
137 {
138 	unsigned int counter;
139 
140 	counter = nmi_evntsel_msr_to_bit(msr);
141 	/* register not managed by the allocator? */
142 	if (counter > NMI_MAX_COUNTER_BITS)
143 		return 1;
144 
145 	if (!test_and_set_bit(counter, evntsel_nmi_owner))
146 		return 1;
147 	return 0;
148 }
149 EXPORT_SYMBOL(reserve_evntsel_nmi);
150 
release_evntsel_nmi(unsigned int msr)151 void release_evntsel_nmi(unsigned int msr)
152 {
153 	unsigned int counter;
154 
155 	counter = nmi_evntsel_msr_to_bit(msr);
156 	/* register not managed by the allocator? */
157 	if (counter > NMI_MAX_COUNTER_BITS)
158 		return;
159 
160 	clear_bit(counter, evntsel_nmi_owner);
161 }
162 EXPORT_SYMBOL(release_evntsel_nmi);
163