xref: /freebsd/sys/dev/hwpmc/hwpmc_x86.c (revision 780fb4a2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005,2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/proc.h>
40 #include <sys/systm.h>
41 
42 #include <machine/cpu.h>
43 #include <machine/cputypes.h>
44 #include <machine/intr_machdep.h>
45 #if (__FreeBSD_version >= 1100000)
46 #include <x86/apicvar.h>
47 #else
48 #include <machine/apicvar.h>
49 #endif
50 #include <machine/pmc_mdep.h>
51 #include <machine/md_var.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 
57 #include "hwpmc_soft.h"
58 
59 /*
60  * Attempt to walk a user call stack using a too-simple algorithm.
61  * In the general case we need unwind information associated with
62  * the executable to be able to walk the user stack.
63  *
64  * We are handed a trap frame laid down at the time the PMC interrupt
65  * was taken.  If the application is using frame pointers, the saved
66  * PC value could be:
67  * a. at the beginning of a function before the stack frame is laid
68  *    down,
69  * b. just before a 'ret', after the stack frame has been taken off,
70  * c. somewhere else in the function with a valid stack frame being
71  *    present,
72  *
73  * If the application is not using frame pointers, this algorithm will
74  * fail to yield an interesting call chain.
75  *
76  * TODO: figure out a way to use unwind information.
77  */
78 
79 int
80 pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
81 {
82 	int n;
83 	uint32_t instr;
84 	uintptr_t fp, oldfp, pc, r, sp;
85 
86 	KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
87 	    __LINE__, (void *) tf));
88 
89 	pc = PMC_TRAPFRAME_TO_PC(tf);
90 	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
91 	sp = PMC_TRAPFRAME_TO_USER_SP(tf);
92 
93 	*cc++ = pc; n = 1;
94 
95 	r = fp + sizeof(uintptr_t); /* points to return address */
96 
97 	if (!PMC_IN_USERSPACE(pc))
98 		return (n);
99 
100 	if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
101 		return (n);
102 
103 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
104 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
105 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
106 			return (n);
107 	} else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
108 		sp += sizeof(uintptr_t);
109 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
110 			return (n);
111 	} else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
112 	    copyin((void *) fp, &fp, sizeof(fp)) != 0)
113 		return (n);
114 
115 	for (; n < nframes;) {
116 		if (pc == 0 || !PMC_IN_USERSPACE(pc))
117 			break;
118 
119 		*cc++ = pc; n++;
120 
121 		if (fp < oldfp)
122 			break;
123 
124 		r = fp + sizeof(uintptr_t); /* address of return address */
125 		oldfp = fp;
126 
127 		if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
128 		    copyin((void *) fp, &fp, sizeof(fp)) != 0)
129 			break;
130 	}
131 
132 	return (n);
133 }
134 
135 /*
136  * Walking the kernel call stack.
137  *
138  * We are handed the trap frame laid down at the time the PMC
139  * interrupt was taken.  The saved PC could be:
140  * a. in the lowlevel trap handler, meaning that there isn't a C stack
141  *    to traverse,
142  * b. at the beginning of a function before the stack frame is laid
143  *    down,
144  * c. just before a 'ret', after the stack frame has been taken off,
145  * d. somewhere else in a function with a valid stack frame being
146  *    present.
147  *
148  * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
149  * the return address is at [%ebp+4]/[%rbp+8].
150  *
151  * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
152  * the frame pointer doesn't need to be changed when going up one
153  * level in the stack.
154  *
155  * For case (a), we check if the PC lies in low-level trap handling
156  * code, and if so we terminate our trace.
157  */
158 
159 int
160 pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
161 {
162 	int n;
163 	uint32_t instr;
164 	uintptr_t fp, pc, r, sp, stackstart, stackend;
165 	struct thread *td;
166 
167 	KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
168 	    __LINE__));
169 
170 	td = curthread;
171 	pc = PMC_TRAPFRAME_TO_PC(tf);
172 	fp = PMC_TRAPFRAME_TO_FP(tf);
173 	sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
174 
175 	*cc++ = pc;
176 	r = fp + sizeof(uintptr_t); /* points to return address */
177 
178 	if (nframes <= 1)
179 		return (1);
180 
181 	stackstart = (uintptr_t) td->td_kstack;
182 	stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
183 
184 	if (PMC_IN_TRAP_HANDLER(pc) ||
185 	    !PMC_IN_KERNEL(pc) ||
186 	    !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
187 	    !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
188 	    !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
189 		return (1);
190 
191 	instr = *(uint32_t *) pc;
192 
193 	/*
194 	 * Determine whether the interrupted function was in the
195 	 * processing of either laying down its stack frame or taking
196 	 * it off.
197 	 *
198 	 * If we haven't started laying down a stack frame, or are
199 	 * just about to return, then our caller's address is at
200 	 * *sp, and we don't have a frame to unwind.
201 	 */
202 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
203 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr))
204 		pc = *(uintptr_t *) sp;
205 	else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
206 		/*
207 		 * The code was midway through laying down a frame.
208 		 * At this point sp[0] has a frame back pointer,
209 		 * and the caller's address is therefore at sp[1].
210 		 */
211 		sp += sizeof(uintptr_t);
212 		if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
213 			return (1);
214 		pc = *(uintptr_t *) sp;
215 	} else {
216 		/*
217 		 * Not in the function prologue or epilogue.
218 		 */
219 		pc = *(uintptr_t *) r;
220 		fp = *(uintptr_t *) fp;
221 	}
222 
223 	for (n = 1; n < nframes; n++) {
224 		*cc++ = pc;
225 
226 		if (PMC_IN_TRAP_HANDLER(pc))
227 			break;
228 
229 		r = fp + sizeof(uintptr_t);
230 		if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
231 		    !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
232 			break;
233 		pc = *(uintptr_t *) r;
234 		fp = *(uintptr_t *) fp;
235 	}
236 
237 	return (n);
238 }
239 
240 /*
241  * Machine dependent initialization for x86 class platforms.
242  */
243 
244 struct pmc_mdep *
245 pmc_md_initialize()
246 {
247 	int i;
248 	struct pmc_mdep *md;
249 
250 	/* determine the CPU kind */
251 	if (cpu_vendor_id == CPU_VENDOR_AMD)
252 		md = pmc_amd_initialize();
253 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
254 		md = pmc_intel_initialize();
255 	else
256 		return (NULL);
257 
258 	/* disallow sampling if we do not have an LAPIC */
259 	if (md != NULL && !lapic_enable_pmc())
260 		for (i = 0; i < md->pmd_nclass; i++) {
261 			if (i == PMC_CLASS_INDEX_SOFT)
262 				continue;
263 			md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
264 		}
265 
266 	return (md);
267 }
268 
269 void
270 pmc_md_finalize(struct pmc_mdep *md)
271 {
272 
273 	lapic_disable_pmc();
274 	if (cpu_vendor_id == CPU_VENDOR_AMD)
275 		pmc_amd_finalize(md);
276 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
277 		pmc_intel_finalize(md);
278 	else
279 		KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));
280 }
281