1 /*- 2 * Copyright (c) 1983, 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)mcount.c 8.1 (Berkeley) 6/4/93 30 * $FreeBSD: src/lib/libc/gmon/mcount.c,v 1.20 2004/10/16 06:32:43 obrien Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/gmon.h> 35 #ifdef _KERNEL 36 #include <sys/systm.h> 37 #include <vm/vm.h> 38 #include <vm/vm_param.h> 39 #include <vm/pmap.h> 40 void bintr(void); 41 void btrap(void); 42 void eintr(void); 43 void user(void); 44 #endif 45 46 /* 47 * mcount is called on entry to each function compiled with the profiling 48 * switch set. _mcount(), which is declared in a machine-dependent way 49 * with _MCOUNT_DECL, does the actual work and is either inlined into a 50 * C routine or called by an assembly stub. In any case, this magic is 51 * taken care of by the MCOUNT definition in <machine/profile.h>. 52 * 53 * _mcount updates data structures that represent traversals of the 54 * program's call graph edges. frompc and selfpc are the return 55 * address and function address that represents the given call graph edge. 56 * 57 * Note: the original BSD code used the same variable (frompcindex) for 58 * both frompcindex and frompc. Any reasonable, modern compiler will 59 * perform this optimization. 60 */ 61 /* _mcount; may be static, inline, etc */ 62 _MCOUNT_DECL(u_long frompc, u_long selfpc) 63 { 64 #ifdef GUPROF 65 u_int delta; 66 #endif 67 u_long frompci; 68 u_short *frompcindex; 69 struct tostruct *top, *prevtop; 70 struct gmonparam *p; 71 long toindex; 72 #ifdef _KERNEL 73 MCOUNT_DECL(s) 74 #endif 75 76 p = &_gmonparam; 77 #ifndef GUPROF /* XXX */ 78 /* 79 * check that we are profiling 80 * and that we aren't recursively invoked. 81 */ 82 if (p->state != GMON_PROF_ON) 83 return; 84 #endif 85 #ifdef _KERNEL 86 MCOUNT_ENTER(s); 87 #else 88 p->state = GMON_PROF_BUSY; /* XXX */ 89 #endif 90 frompci = frompc - p->lowpc; 91 92 #ifdef _KERNEL 93 /* 94 * When we are called from an exception handler, frompci may be 95 * for a user address. Convert such frompci's to the index of 96 * user() to merge all user counts. 97 */ 98 if (frompci >= p->textsize) { 99 if (frompci + p->lowpc 100 >= (u_long)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE)) 101 goto done; 102 frompci = (u_long)user - p->lowpc; 103 if (frompci >= p->textsize) 104 goto done; 105 } 106 #endif 107 108 #ifdef GUPROF 109 if (p->state != GMON_PROF_HIRES) 110 goto skip_guprof_stuff; 111 /* 112 * Look at the clock and add the count of clock cycles since the 113 * clock was last looked at to a counter for frompc. This 114 * solidifies the count for the function containing frompc and 115 * effectively starts another clock for the current function. 116 * The count for the new clock will be solidified when another 117 * function call is made or the function returns. 118 * 119 * We use the usual sampling counters since they can be located 120 * efficiently. 4-byte counters are usually necessary. 121 * 122 * There are many complications for subtracting the profiling 123 * overheads from the counts for normal functions and adding 124 * them to the counts for mcount(), mexitcount() and cputime(). 125 * We attempt to handle fractional cycles, but the overheads 126 * are usually underestimated because they are calibrated for 127 * a simpler than usual setup. 128 */ 129 delta = cputime() - p->mcount_overhead; 130 p->cputime_overhead_resid += p->cputime_overhead_frac; 131 p->mcount_overhead_resid += p->mcount_overhead_frac; 132 if ((int)delta < 0) 133 *p->mcount_count += delta + p->mcount_overhead 134 - p->cputime_overhead; 135 else if (delta != 0) { 136 if (p->cputime_overhead_resid >= CALIB_SCALE) { 137 p->cputime_overhead_resid -= CALIB_SCALE; 138 ++*p->cputime_count; 139 --delta; 140 } 141 if (delta != 0) { 142 if (p->mcount_overhead_resid >= CALIB_SCALE) { 143 p->mcount_overhead_resid -= CALIB_SCALE; 144 ++*p->mcount_count; 145 --delta; 146 } 147 KCOUNT(p, frompci) += delta; 148 } 149 *p->mcount_count += p->mcount_overhead_sub; 150 } 151 *p->cputime_count += p->cputime_overhead; 152 skip_guprof_stuff: 153 #endif /* GUPROF */ 154 155 #ifdef _KERNEL 156 /* 157 * When we are called from an exception handler, frompc is faked 158 * to be for where the exception occurred. We've just solidified 159 * the count for there. Now convert frompci to the index of btrap() 160 * for trap handlers and bintr() for interrupt handlers to make 161 * exceptions appear in the call graph as calls from btrap() and 162 * bintr() instead of calls from all over. 163 */ 164 if ((selfpc >= (u_long)btrap) && (selfpc < (u_long)eintr)) { 165 if (selfpc >= (u_long)bintr) 166 frompci = (u_long)bintr - p->lowpc; 167 else 168 frompci = (u_long)btrap - p->lowpc; 169 } 170 #endif 171 172 /* 173 * check that frompc is a reasonable pc value. 174 * for example: signal catchers get called from the stack, 175 * not from text space. too bad. 176 */ 177 if (frompci >= p->textsize) 178 goto done; 179 180 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))]; 181 toindex = *frompcindex; 182 if (toindex == 0) { 183 /* 184 * first time traversing this arc 185 */ 186 toindex = ++p->tos[0].link; 187 if (toindex >= p->tolimit) 188 /* halt further profiling */ 189 goto overflow; 190 191 *frompcindex = toindex; 192 top = &p->tos[toindex]; 193 top->selfpc = selfpc; 194 top->count = 1; 195 top->link = 0; 196 goto done; 197 } 198 top = &p->tos[toindex]; 199 if (top->selfpc == selfpc) { 200 /* 201 * arc at front of chain; usual case. 202 */ 203 top->count++; 204 goto done; 205 } 206 /* 207 * have to go looking down chain for it. 208 * top points to what we are looking at, 209 * prevtop points to previous top. 210 * we know it is not at the head of the chain. 211 */ 212 for (; /* goto done */; ) { 213 if (top->link == 0) { 214 /* 215 * top is end of the chain and none of the chain 216 * had top->selfpc == selfpc. 217 * so we allocate a new tostruct 218 * and link it to the head of the chain. 219 */ 220 toindex = ++p->tos[0].link; 221 if (toindex >= p->tolimit) 222 goto overflow; 223 224 top = &p->tos[toindex]; 225 top->selfpc = selfpc; 226 top->count = 1; 227 top->link = *frompcindex; 228 *frompcindex = toindex; 229 goto done; 230 } 231 /* 232 * otherwise, check the next arc on the chain. 233 */ 234 prevtop = top; 235 top = &p->tos[top->link]; 236 if (top->selfpc == selfpc) { 237 /* 238 * there it is. 239 * increment its count 240 * move it to the head of the chain. 241 */ 242 top->count++; 243 toindex = prevtop->link; 244 prevtop->link = top->link; 245 top->link = *frompcindex; 246 *frompcindex = toindex; 247 goto done; 248 } 249 250 } 251 done: 252 #ifdef _KERNEL 253 MCOUNT_EXIT(s); 254 #else 255 p->state = GMON_PROF_ON; /* XXX */ 256 #endif 257 return; 258 overflow: 259 p->state = GMON_PROF_ERROR; /* XXX */ 260 #ifdef _KERNEL 261 MCOUNT_EXIT(s); 262 #endif 263 return; 264 } 265 266 /* 267 * Actual definition of mcount function. Defined in <machine/profile.h>, 268 * which is included by <sys/gmon.h>. 269 */ 270 MCOUNT 271 272 #ifdef GUPROF 273 void 274 mexitcount(u_long selfpc) 275 { 276 struct gmonparam *p; 277 u_long selfpcdiff; 278 279 p = &_gmonparam; 280 selfpcdiff = selfpc - p->lowpc; 281 if (selfpcdiff < p->textsize) { 282 u_int delta; 283 284 /* 285 * Solidify the count for the current function. 286 */ 287 delta = cputime() - p->mexitcount_overhead; 288 p->cputime_overhead_resid += p->cputime_overhead_frac; 289 p->mexitcount_overhead_resid += p->mexitcount_overhead_frac; 290 if ((int)delta < 0) 291 *p->mexitcount_count += delta + p->mexitcount_overhead 292 - p->cputime_overhead; 293 else if (delta != 0) { 294 if (p->cputime_overhead_resid >= CALIB_SCALE) { 295 p->cputime_overhead_resid -= CALIB_SCALE; 296 ++*p->cputime_count; 297 --delta; 298 } 299 if (delta != 0) { 300 if (p->mexitcount_overhead_resid 301 >= CALIB_SCALE) { 302 p->mexitcount_overhead_resid 303 -= CALIB_SCALE; 304 ++*p->mexitcount_count; 305 --delta; 306 } 307 KCOUNT(p, selfpcdiff) += delta; 308 } 309 *p->mexitcount_count += p->mexitcount_overhead_sub; 310 } 311 *p->cputime_count += p->cputime_overhead; 312 } 313 } 314 #endif /* GUPROF */ 315