xref: /openbsd/sys/ddb/db_prof.c (revision 3cab2bb3)
1 /*	$OpenBSD: db_prof.c,v 1.4 2017/08/11 15:14:23 nayden Exp $	*/
2 
3 /*
4  * Copyright (c) 2016 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*-
19  * Copyright (c) 1983, 1992, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions
24  * are met:
25  * 1. Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  * 2. Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in the
29  *    documentation and/or other materials provided with the distribution.
30  * 3. Neither the name of the University nor the names of its contributors
31  *    may be used to endorse or promote products derived from this software
32  *    without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/exec_elf.h>
52 #include <sys/malloc.h>
53 #include <sys/gmon.h>
54 
55 #include <machine/db_machdep.h>
56 #include <ddb/db_extern.h>
57 #include <ddb/db_access.h> /* for db_write_bytes() */
58 #include <ddb/db_sym.h>
59 
60 extern char etext[];
61 
62 struct prof_probe {
63 	const char		*pp_name;
64 	Elf_Sym			*pp_symb;
65 	SLIST_ENTRY(prof_probe)	 pp_next;
66 	vaddr_t			 pp_inst;
67 	int			 pp_on;
68 };
69 
70 #define PPTSIZE		PAGE_SIZE
71 #define	PPTMASK		((PPTSIZE / sizeof(struct prof_probe)) - 1)
72 #define INSTTOIDX(inst)	((((unsigned long)(inst)) >> 4) & PPTMASK)
73 SLIST_HEAD(, prof_probe) *pp_table;
74 
75 extern int db_profile;			/* Allow dynamic profiling */
76 int db_prof_on;				/* Profiling state On/Off */
77 
78 vaddr_t db_get_pc(struct trapframe *);
79 vaddr_t db_get_probe_addr(struct trapframe *);
80 
81 void db_prof_forall(Elf_Sym *, char *, char *, int, void *);
82 void db_prof_count(unsigned long, unsigned long);
83 
84 void
85 db_prof_init(void)
86 {
87 	unsigned long nentries;
88 
89 	pp_table = malloc(PPTSIZE, M_TEMP, M_NOWAIT|M_ZERO);
90 	if (pp_table == NULL)
91 		return;
92 
93 	db_elf_sym_forall(db_prof_forall, &nentries);
94 	printf("ddb probe table references %lu entry points\n", nentries);
95 }
96 
97 void
98 db_prof_forall(Elf_Sym *sym, char *name, char *suff, int pre, void *xarg)
99 {
100 	Elf_Sym *symb = sym;
101 	unsigned long *nentries = xarg;
102 	struct prof_probe *pp;
103 	vaddr_t inst;
104 
105 	if (ELF_ST_TYPE(symb->st_info) != STT_FUNC)
106 		return;
107 
108 	inst = symb->st_value;
109 	if (inst < KERNBASE || inst >= (vaddr_t)&etext)
110 		return;
111 
112 	if (*((uint8_t *)inst) != SSF_INST)
113 		return;
114 
115 	if (strncmp(name, "db_", 3) == 0 || strncmp(name, "trap", 4) == 0)
116 		return;
117 
118 #ifdef __i386__
119 	/* Avoid a recursion in db_write_text(). */
120 	if (strncmp(name, "pmap_pte", 8) == 0)
121 		return;
122 #endif
123 
124 	pp = malloc(sizeof(struct prof_probe), M_TEMP, M_NOWAIT|M_ZERO);
125 	if (pp == NULL)
126 		return;
127 
128 	pp->pp_name = name;
129 	pp->pp_inst = inst;
130 	pp->pp_symb = symb;
131 
132 	SLIST_INSERT_HEAD(&pp_table[INSTTOIDX(pp->pp_inst)], pp, pp_next);
133 
134 	(*nentries)++;
135 }
136 
137 int
138 db_prof_enable(void)
139 {
140 #if defined(__amd64__) || defined(__i386__)
141 	struct prof_probe *pp;
142 	uint8_t patch = BKPT_INST;
143 	unsigned long s;
144 	int i;
145 
146 	if (!db_profile)
147 		return EPERM;
148 
149 	if (pp_table == NULL)
150 		return ENOENT;
151 
152 	KASSERT(BKPT_SIZE == SSF_SIZE);
153 
154 	s = intr_disable();
155 	for (i = 0; i < (PPTSIZE / sizeof(*pp)); i++) {
156 		SLIST_FOREACH(pp, &pp_table[i], pp_next) {
157 			pp->pp_on = 1;
158 			db_write_bytes(pp->pp_inst, BKPT_SIZE, &patch);
159 		}
160 	}
161 	intr_restore(s);
162 
163 	db_prof_on = 1;
164 
165 	return 0;
166 #else
167 	return ENOENT;
168 #endif
169 }
170 
171 void
172 db_prof_disable(void)
173 {
174 	struct prof_probe *pp;
175 	uint8_t patch = SSF_INST;
176 	unsigned long s;
177 	int i;
178 
179 	db_prof_on = 0;
180 
181 	s = intr_disable();
182 	for (i = 0; i < (PPTSIZE / sizeof(*pp)); i++) {
183 		SLIST_FOREACH(pp, &pp_table[i], pp_next) {
184 			db_write_bytes(pp->pp_inst, SSF_SIZE, &patch);
185 			pp->pp_on = 0;
186 		}
187 	}
188 	intr_restore(s);
189 }
190 
191 int
192 db_prof_hook(struct trapframe *frame)
193 {
194 	struct prof_probe *pp;
195 	vaddr_t pc, inst;
196 
197 	if (pp_table == NULL)
198 		return 0;
199 
200 	pc = db_get_pc(frame);
201 	inst = db_get_probe_addr(frame);
202 
203 	SLIST_FOREACH(pp, &pp_table[INSTTOIDX(inst)], pp_next) {
204 		if (pp->pp_on && pp->pp_inst == inst) {
205 			if (db_prof_on)
206 				db_prof_count(pc, inst);
207 			return 1;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 /*
215  * Equivalent to mcount(), must be called with interrupt disabled.
216  */
217 void
218 db_prof_count(unsigned long frompc, unsigned long selfpc)
219 {
220 	unsigned short *frompcindex;
221 	struct tostruct *top, *prevtop;
222 	struct gmonparam *p;
223 	long toindex;
224 
225 	if ((p = curcpu()->ci_gmon) == NULL)
226 		return;
227 
228 	/*
229 	 * check that we are profiling
230 	 * and that we aren't recursively invoked.
231 	 */
232 	if (p->state != GMON_PROF_ON)
233 		return;
234 
235 	/*
236 	 * check that frompcindex is a reasonable pc value.
237 	 * for example:	signal catchers get called from the stack,
238 	 *		not from text space.  too bad.
239 	 */
240 	frompc -= p->lowpc;
241 	if (frompc > p->textsize)
242 		return;
243 
244 #if (HASHFRACTION & (HASHFRACTION - 1)) == 0
245 	if (p->hashfraction == HASHFRACTION)
246 		frompcindex =
247 		    &p->froms[frompc / (HASHFRACTION * sizeof(*p->froms))];
248 	else
249 #endif
250 		frompcindex =
251 		    &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
252 	toindex = *frompcindex;
253 	if (toindex == 0) {
254 		/*
255 		 *	first time traversing this arc
256 		 */
257 		toindex = ++p->tos[0].link;
258 		if (toindex >= p->tolimit)
259 			/* halt further profiling */
260 			goto overflow;
261 
262 		*frompcindex = toindex;
263 		top = &p->tos[toindex];
264 		top->selfpc = selfpc;
265 		top->count = 1;
266 		top->link = 0;
267 		return;
268 	}
269 	top = &p->tos[toindex];
270 	if (top->selfpc == selfpc) {
271 		/*
272 		 * arc at front of chain; usual case.
273 		 */
274 		top->count++;
275 		return;
276 	}
277 	/*
278 	 * have to go looking down chain for it.
279 	 * top points to what we are looking at,
280 	 * prevtop points to previous top.
281 	 * we know it is not at the head of the chain.
282 	 */
283 	for (; /* return */; ) {
284 		if (top->link == 0) {
285 			/*
286 			 * top is end of the chain and none of the chain
287 			 * had top->selfpc == selfpc.
288 			 * so we allocate a new tostruct
289 			 * and link it to the head of the chain.
290 			 */
291 			toindex = ++p->tos[0].link;
292 			if (toindex >= p->tolimit)
293 				goto overflow;
294 
295 			top = &p->tos[toindex];
296 			top->selfpc = selfpc;
297 			top->count = 1;
298 			top->link = *frompcindex;
299 			*frompcindex = toindex;
300 			return;
301 		}
302 		/*
303 		 * otherwise, check the next arc on the chain.
304 		 */
305 		prevtop = top;
306 		top = &p->tos[top->link];
307 		if (top->selfpc == selfpc) {
308 			/*
309 			 * there it is.
310 			 * increment its count
311 			 * move it to the head of the chain.
312 			 */
313 			top->count++;
314 			toindex = prevtop->link;
315 			prevtop->link = top->link;
316 			top->link = *frompcindex;
317 			*frompcindex = toindex;
318 			return;
319 		}
320 	}
321 
322 overflow:
323 	p->state = GMON_PROF_ERROR;
324 }
325