xref: /netbsd/sys/kern/subr_prof.c (revision bf9ec67e)
1 /*	$NetBSD: subr_prof.c,v 1.25 2001/11/12 15:25:21 lukem Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)subr_prof.c	8.4 (Berkeley) 2/14/95
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: subr_prof.c,v 1.25 2001/11/12 15:25:21 lukem Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/user.h>
46 #include <sys/mount.h>
47 #include <sys/syscallargs.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/cpu.h>
51 
52 #ifdef GPROF
53 #include <sys/malloc.h>
54 #include <sys/gmon.h>
55 
56 /*
57  * Froms is actually a bunch of unsigned shorts indexing tos
58  */
59 struct gmonparam _gmonparam = { GMON_PROF_OFF };
60 
61 /* Actual start of the kernel text segment. */
62 extern char kernel_text[];
63 
64 extern char etext[];
65 
66 
67 void
68 kmstartup()
69 {
70 	char *cp;
71 	struct gmonparam *p = &_gmonparam;
72 	/*
73 	 * Round lowpc and highpc to multiples of the density we're using
74 	 * so the rest of the scaling (here and in gprof) stays in ints.
75 	 */
76 	p->lowpc = ROUNDDOWN(((u_long)kernel_text),
77 		HISTFRACTION * sizeof(HISTCOUNTER));
78 	p->highpc = ROUNDUP((u_long)etext,
79 		HISTFRACTION * sizeof(HISTCOUNTER));
80 	p->textsize = p->highpc - p->lowpc;
81 	printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
82 	       p->textsize, p->lowpc, p->highpc);
83 	p->kcountsize = p->textsize / HISTFRACTION;
84 	p->hashfraction = HASHFRACTION;
85 	p->fromssize = p->textsize / HASHFRACTION;
86 	p->tolimit = p->textsize * ARCDENSITY / 100;
87 	if (p->tolimit < MINARCS)
88 		p->tolimit = MINARCS;
89 	else if (p->tolimit > MAXARCS)
90 		p->tolimit = MAXARCS;
91 	p->tossize = p->tolimit * sizeof(struct tostruct);
92 	cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
93 	    M_GPROF, M_NOWAIT);
94 	if (cp == 0) {
95 		printf("No memory for profiling.\n");
96 		return;
97 	}
98 	memset(cp, 0, p->kcountsize + p->tossize + p->fromssize);
99 	p->tos = (struct tostruct *)cp;
100 	cp += p->tossize;
101 	p->kcount = (u_short *)cp;
102 	cp += p->kcountsize;
103 	p->froms = (u_short *)cp;
104 }
105 
106 /*
107  * Return kernel profiling information.
108  */
109 int
110 sysctl_doprof(name, namelen, oldp, oldlenp, newp, newlen)
111 	int *name;
112 	u_int namelen;
113 	void *oldp;
114 	size_t *oldlenp;
115 	void *newp;
116 	size_t newlen;
117 {
118 	struct gmonparam *gp = &_gmonparam;
119 	int error;
120 
121 	/* all sysctl names at this level are terminal */
122 	if (namelen != 1)
123 		return (ENOTDIR);		/* overloaded */
124 
125 	/* Check we got the necessary memory at startup. */
126 	if (gp->kcount == NULL)
127 		return (EOPNOTSUPP);
128 
129 	switch (name[0]) {
130 	case GPROF_STATE:
131 		error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
132 		if (error)
133 			return (error);
134 		if (gp->state == GMON_PROF_OFF)
135 			stopprofclock(&proc0);
136 		else
137 			startprofclock(&proc0);
138 		return (0);
139 	case GPROF_COUNT:
140 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
141 		    gp->kcount, gp->kcountsize));
142 	case GPROF_FROMS:
143 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
144 		    gp->froms, gp->fromssize));
145 	case GPROF_TOS:
146 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
147 		    gp->tos, gp->tossize));
148 	case GPROF_GMONPARAM:
149 		return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof(*gp)));
150 	default:
151 		return (EOPNOTSUPP);
152 	}
153 	/* NOTREACHED */
154 }
155 #endif /* GPROF */
156 
157 /*
158  * Profiling system call.
159  *
160  * The scale factor is a fixed point number with 16 bits of fraction, so that
161  * 1.0 is represented as 0x10000.  A scale factor of 0 turns off profiling.
162  */
163 /* ARGSUSED */
164 int
165 sys_profil(p, v, retval)
166 	struct proc *p;
167 	void *v;
168 	register_t *retval;
169 {
170 	struct sys_profil_args /* {
171 		syscallarg(caddr_t) samples;
172 		syscallarg(u_int) size;
173 		syscallarg(u_int) offset;
174 		syscallarg(u_int) scale;
175 	} */ *uap = v;
176 	struct uprof *upp;
177 	int s;
178 
179 	if (SCARG(uap, scale) > (1 << 16))
180 		return (EINVAL);
181 	if (SCARG(uap, scale) == 0) {
182 		stopprofclock(p);
183 		return (0);
184 	}
185 	upp = &p->p_stats->p_prof;
186 
187 	/* Block profile interrupts while changing state. */
188 	s = splstatclock();
189 	upp->pr_off = SCARG(uap, offset);
190 	upp->pr_scale = SCARG(uap, scale);
191 	upp->pr_base = SCARG(uap, samples);
192 	upp->pr_size = SCARG(uap, size);
193 	startprofclock(p);
194 	splx(s);
195 
196 	return (0);
197 }
198 
199 /*
200  * Scale is a fixed-point number with the binary point 16 bits
201  * into the value, and is <= 1.0.  pc is at most 32 bits, so the
202  * intermediate result is at most 48 bits.
203  */
204 #define	PC_TO_INDEX(pc, prof) \
205 	((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
206 	    (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
207 
208 /*
209  * Collect user-level profiling statistics; called on a profiling tick,
210  * when a process is running in user-mode.  This routine may be called
211  * from an interrupt context.  We try to update the user profiling buffers
212  * cheaply with fuswintr() and suswintr().  If that fails, we revert to
213  * an AST that will vector us to trap() with a context in which copyin
214  * and copyout will work.  Trap will then call addupc_task().
215  *
216  * Note that we may (rarely) not get around to the AST soon enough, and
217  * lose profile ticks when the next tick overwrites this one, but in this
218  * case the system is overloaded and the profile is probably already
219  * inaccurate.
220  */
221 void
222 addupc_intr(p, pc)
223 	struct proc *p;
224 	u_long pc;
225 {
226 	struct uprof *prof;
227 	caddr_t addr;
228 	u_int i;
229 	int v;
230 
231 	prof = &p->p_stats->p_prof;
232 	if (pc < prof->pr_off ||
233 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
234 		return;			/* out of range; ignore */
235 
236 	addr = prof->pr_base + i;
237 	if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + 1) == -1) {
238 		prof->pr_addr = pc;
239 		prof->pr_ticks++;
240 		need_proftick(p);
241 	}
242 }
243 
244 /*
245  * Much like before, but we can afford to take faults here.  If the
246  * update fails, we simply turn off profiling.
247  */
248 void
249 addupc_task(p, pc, ticks)
250 	struct proc *p;
251 	u_long pc;
252 	u_int ticks;
253 {
254 	struct uprof *prof;
255 	caddr_t addr;
256 	u_int i;
257 	u_short v;
258 
259 	/* Testing P_PROFIL may be unnecessary, but is certainly safe. */
260 	if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
261 		return;
262 
263 	prof = &p->p_stats->p_prof;
264 	if (pc < prof->pr_off ||
265 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
266 		return;
267 
268 	addr = prof->pr_base + i;
269 	if (copyin(addr, (caddr_t)&v, sizeof(v)) == 0) {
270 		v += ticks;
271 		if (copyout((caddr_t)&v, addr, sizeof(v)) == 0)
272 			return;
273 	}
274 	stopprofclock(p);
275 }
276