FreeBSD/Linux Kernel Cross Reference
sys/libkern/mcount.c
1 /*-
2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS)
35 #if 0
36 static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93";
37 #endif
38 static const char rcsid[] =
39 "$FreeBSD: src/sys/libkern/mcount.c,v 1.10.2.1 1999/09/05 08:16:05 peter Exp $";
40 #endif
41
42 #include <sys/param.h>
43 #include <sys/gmon.h>
44 #ifdef KERNEL
45 #ifndef GUPROF
46 #include <sys/systm.h>
47 #endif
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/pmap.h>
51 void bintr __P((void));
52 void btrap __P((void));
53 void eintr __P((void));
54 void user __P((void));
55 #endif
56
57 /*
58 * mcount is called on entry to each function compiled with the profiling
59 * switch set. _mcount(), which is declared in a machine-dependent way
60 * with _MCOUNT_DECL, does the actual work and is either inlined into a
61 * C routine or called by an assembly stub. In any case, this magic is
62 * taken care of by the MCOUNT definition in <machine/profile.h>.
63 *
64 * _mcount updates data structures that represent traversals of the
65 * program's call graph edges. frompc and selfpc are the return
66 * address and function address that represents the given call graph edge.
67 *
68 * Note: the original BSD code used the same variable (frompcindex) for
69 * both frompcindex and frompc. Any reasonable, modern compiler will
70 * perform this optimization.
71 */
72 _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
73 register fptrint_t frompc, selfpc;
74 {
75 #ifdef GUPROF
76 int delta;
77 #endif
78 register fptrdiff_t frompci;
79 register u_short *frompcindex;
80 register struct tostruct *top, *prevtop;
81 register struct gmonparam *p;
82 register long toindex;
83 #ifdef KERNEL
84 MCOUNT_DECL(s)
85 #endif
86
87 p = &_gmonparam;
88 #ifndef GUPROF /* XXX */
89 /*
90 * check that we are profiling
91 * and that we aren't recursively invoked.
92 */
93 if (p->state != GMON_PROF_ON)
94 return;
95 #endif
96 #ifdef KERNEL
97 MCOUNT_ENTER(s);
98 #else
99 p->state = GMON_PROF_BUSY;
100 #endif
101 frompci = frompc - p->lowpc;
102
103 #ifdef KERNEL
104 /*
105 * When we are called from an exception handler, frompci may be
106 * for a user address. Convert such frompci's to the index of
107 * user() to merge all user counts.
108 */
109 if (frompci >= p->textsize) {
110 if (frompci + p->lowpc
111 >= (fptrint_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
112 goto done;
113 frompci = (fptrint_t)user - p->lowpc;
114 if (frompci >= p->textsize)
115 goto done;
116 }
117 #endif /* KERNEL */
118
119 #ifdef GUPROF
120 if (p->state == GMON_PROF_HIRES) {
121 /*
122 * Count the time since cputime() was previously called
123 * against `frompc'. Compensate for overheads.
124 *
125 * cputime() sets its prev_count variable to the count when
126 * it is called. This in effect starts a counter for
127 * the next period of execution (normally from now until
128 * the next call to mcount() or mexitcount()). We set
129 * cputime_bias to compensate for our own overhead.
130 *
131 * We use the usual sampling counters since they can be
132 * located efficiently. 4-byte counters are usually
133 * necessary. gprof will add up the scattered counts
134 * just like it does for statistical profiling. All
135 * counts are signed so that underflow in the subtractions
136 * doesn't matter much (negative counts are normally
137 * compensated for by larger counts elsewhere). Underflow
138 * shouldn't occur, but may be caused by slightly wrong
139 * calibrations or from not clearing cputime_bias.
140 */
141 delta = cputime() - cputime_bias - p->mcount_pre_overhead;
142 cputime_bias = p->mcount_post_overhead;
143 KCOUNT(p, frompci) += delta;
144 *p->cputime_count += p->cputime_overhead;
145 *p->mcount_count += p->mcount_overhead;
146 }
147 #endif /* GUPROF */
148
149 #ifdef KERNEL
150 /*
151 * When we are called from an exception handler, frompc is faked
152 * to be for where the exception occurred. We've just solidified
153 * the count for there. Now convert frompci to the index of btrap()
154 * for trap handlers and bintr() for interrupt handlers to make
155 * exceptions appear in the call graph as calls from btrap() and
156 * bintr() instead of calls from all over.
157 */
158 if ((fptrint_t)selfpc >= (fptrint_t)btrap
159 && (fptrint_t)selfpc < (fptrint_t)eintr) {
160 if ((fptrint_t)selfpc >= (fptrint_t)bintr)
161 frompci = (fptrint_t)bintr - p->lowpc;
162 else
163 frompci = (fptrint_t)btrap - p->lowpc;
164 }
165 #endif /* KERNEL */
166
167 /*
168 * check that frompc is a reasonable pc value.
169 * for example: signal catchers get called from the stack,
170 * not from text space. too bad.
171 */
172 if (frompci >= p->textsize)
173 goto done;
174
175 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
176 toindex = *frompcindex;
177 if (toindex == 0) {
178 /*
179 * first time traversing this arc
180 */
181 toindex = ++p->tos[0].link;
182 if (toindex >= p->tolimit)
183 /* halt further profiling */
184 goto overflow;
185
186 *frompcindex = toindex;
187 top = &p->tos[toindex];
188 top->selfpc = selfpc;
189 top->count = 1;
190 top->link = 0;
191 goto done;
192 }
193 top = &p->tos[toindex];
194 if (top->selfpc == selfpc) {
195 /*
196 * arc at front of chain; usual case.
197 */
198 top->count++;
199 goto done;
200 }
201 /*
202 * have to go looking down chain for it.
203 * top points to what we are looking at,
204 * prevtop points to previous top.
205 * we know it is not at the head of the chain.
206 */
207 for (; /* goto done */; ) {
208 if (top->link == 0) {
209 /*
210 * top is end of the chain and none of the chain
211 * had top->selfpc == selfpc.
212 * so we allocate a new tostruct
213 * and link it to the head of the chain.
214 */
215 toindex = ++p->tos[0].link;
216 if (toindex >= p->tolimit)
217 goto overflow;
218
219 top = &p->tos[toindex];
220 top->selfpc = selfpc;
221 top->count = 1;
222 top->link = *frompcindex;
223 *frompcindex = toindex;
224 goto done;
225 }
226 /*
227 * otherwise, check the next arc on the chain.
228 */
229 prevtop = top;
230 top = &p->tos[top->link];
231 if (top->selfpc == selfpc) {
232 /*
233 * there it is.
234 * increment its count
235 * move it to the head of the chain.
236 */
237 top->count++;
238 toindex = prevtop->link;
239 prevtop->link = top->link;
240 top->link = *frompcindex;
241 *frompcindex = toindex;
242 goto done;
243 }
244
245 }
246 done:
247 #ifdef KERNEL
248 MCOUNT_EXIT(s);
249 #else
250 p->state = GMON_PROF_ON;
251 #endif
252 return;
253 overflow:
254 p->state = GMON_PROF_ERROR;
255 #ifdef KERNEL
256 MCOUNT_EXIT(s);
257 #endif
258 return;
259 }
260
261 /*
262 * Actual definition of mcount function. Defined in <machine/profile.h>,
263 * which is included by <sys/gmon.h>.
264 */
265 MCOUNT
266
267 #ifdef GUPROF
268 void
269 mexitcount(selfpc)
270 fptrint_t selfpc;
271 {
272 struct gmonparam *p;
273 fptrint_t selfpcdiff;
274
275 p = &_gmonparam;
276 selfpcdiff = selfpc - (fptrint_t)p->lowpc;
277 if (selfpcdiff < p->textsize) {
278 int delta;
279
280 /*
281 * Count the time since cputime() was previously called
282 * against `selfpc'. Compensate for overheads.
283 */
284 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
285 cputime_bias = p->mexitcount_post_overhead;
286 KCOUNT(p, selfpcdiff) += delta;
287 *p->cputime_count += p->cputime_overhead;
288 *p->mexitcount_count += p->mexitcount_overhead;
289 }
290 }
291
292 void
293 empty_loop()
294 {
295 int i;
296
297 for (i = 0; i < CALIB_SCALE; i++)
298 ;
299 }
300
301 void
302 nullfunc()
303 {
304 }
305
306 void
307 nullfunc_loop()
308 {
309 int i;
310
311 for (i = 0; i < CALIB_SCALE; i++)
312 nullfunc();
313 }
314 #endif /* GUPROF */
Cache object: f8518869bedd885465063c15626d70bb
|