FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_prof.c
1 /*-
2 * Copyright (c) 1982, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/5.2/sys/kern/subr_prof.c 116182 2003-06-11 00:56:59Z obrien $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sysctl.h>
48
49 #include <machine/cpu.h>
50
51 #ifdef GPROF
52 #include <sys/malloc.h>
53 #include <sys/gmon.h>
54 #undef MCOUNT
55
56 static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
57
58 static void kmstartup(void *);
59 SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL)
60
61 struct gmonparam _gmonparam = { GMON_PROF_OFF };
62
63 #ifdef GUPROF
64 #include <machine/asmacros.h>
65
66 void
67 nullfunc_loop_profiled()
68 {
69 int i;
70
71 for (i = 0; i < CALIB_SCALE; i++)
72 nullfunc_profiled();
73 }
74
75 #define nullfunc_loop_profiled_end nullfunc_profiled /* XXX */
76
77 void
78 nullfunc_profiled()
79 {
80 }
81 #endif /* GUPROF */
82
83 /*
84 * Update the histograms to support extending the text region arbitrarily.
85 * This is done slightly naively (no sparse regions), so will waste slight
86 * amounts of memory, but will overall work nicely enough to allow profiling
87 * of KLDs.
88 */
89 void
90 kmupetext(uintfptr_t nhighpc)
91 {
92 struct gmonparam np; /* slightly large */
93 struct gmonparam *p = &_gmonparam;
94 char *cp;
95
96 GIANT_REQUIRED;
97 bcopy(p, &np, sizeof(*p));
98 np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
99 if (np.highpc <= p->highpc)
100 return;
101 np.textsize = np.highpc - p->lowpc;
102 np.kcountsize = np.textsize / HISTFRACTION;
103 np.hashfraction = HASHFRACTION;
104 np.fromssize = np.textsize / HASHFRACTION;
105 np.tolimit = np.textsize * ARCDENSITY / 100;
106 if (np.tolimit < MINARCS)
107 np.tolimit = MINARCS;
108 else if (np.tolimit > MAXARCS)
109 np.tolimit = MAXARCS;
110 np.tossize = np.tolimit * sizeof(struct tostruct);
111 cp = malloc(np.kcountsize + np.fromssize + np.tossize,
112 M_GPROF, M_WAITOK);
113 /*
114 * Check for something else extending highpc while we slept.
115 */
116 if (np.highpc <= p->highpc) {
117 free(cp, M_GPROF);
118 return;
119 }
120 np.tos = (struct tostruct *)cp;
121 cp += np.tossize;
122 np.kcount = (HISTCOUNTER *)cp;
123 cp += np.kcountsize;
124 np.froms = (u_short *)cp;
125 #ifdef GUPROF
126 /* Reinitialize pointers to overhead counters. */
127 np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
128 np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
129 np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
130 #endif
131 critical_enter();
132 bcopy(p->tos, np.tos, p->tossize);
133 bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
134 bcopy(p->kcount, np.kcount, p->kcountsize);
135 bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
136 p->kcountsize);
137 bcopy(p->froms, np.froms, p->fromssize);
138 bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
139 cp = (char *)p->tos;
140 bcopy(&np, p, sizeof(*p));
141 critical_exit();
142 free(cp, M_GPROF);
143 }
144
145 static void
146 kmstartup(dummy)
147 void *dummy;
148 {
149 char *cp;
150 struct gmonparam *p = &_gmonparam;
151 #ifdef GUPROF
152 int cputime_overhead;
153 int empty_loop_time;
154 int i;
155 int mcount_overhead;
156 int mexitcount_overhead;
157 int nullfunc_loop_overhead;
158 int nullfunc_loop_profiled_time;
159 uintfptr_t tmp_addr;
160 #endif
161
162 /*
163 * Round lowpc and highpc to multiples of the density we're using
164 * so the rest of the scaling (here and in gprof) stays in ints.
165 */
166 p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
167 p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
168 p->textsize = p->highpc - p->lowpc;
169 printf("Profiling kernel, textsize=%lu [%x..%x]\n",
170 p->textsize, p->lowpc, p->highpc);
171 p->kcountsize = p->textsize / HISTFRACTION;
172 p->hashfraction = HASHFRACTION;
173 p->fromssize = p->textsize / HASHFRACTION;
174 p->tolimit = p->textsize * ARCDENSITY / 100;
175 if (p->tolimit < MINARCS)
176 p->tolimit = MINARCS;
177 else if (p->tolimit > MAXARCS)
178 p->tolimit = MAXARCS;
179 p->tossize = p->tolimit * sizeof(struct tostruct);
180 cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
181 M_GPROF, M_WAITOK | M_ZERO);
182 p->tos = (struct tostruct *)cp;
183 cp += p->tossize;
184 p->kcount = (HISTCOUNTER *)cp;
185 cp += p->kcountsize;
186 p->froms = (u_short *)cp;
187
188 #ifdef GUPROF
189 /* Initialize pointers to overhead counters. */
190 p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
191 p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
192 p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
193
194 /*
195 * Disable interrupts to avoid interference while we calibrate
196 * things.
197 */
198 critical_enter();
199
200 /*
201 * Determine overheads.
202 * XXX this needs to be repeated for each useful timer/counter.
203 */
204 cputime_overhead = 0;
205 startguprof(p);
206 for (i = 0; i < CALIB_SCALE; i++)
207 cputime_overhead += cputime();
208
209 empty_loop();
210 startguprof(p);
211 empty_loop();
212 empty_loop_time = cputime();
213
214 nullfunc_loop_profiled();
215
216 /*
217 * Start profiling. There won't be any normal function calls since
218 * interrupts are disabled, but we will call the profiling routines
219 * directly to determine their overheads.
220 */
221 p->state = GMON_PROF_HIRES;
222
223 startguprof(p);
224 nullfunc_loop_profiled();
225
226 startguprof(p);
227 for (i = 0; i < CALIB_SCALE; i++)
228 #if defined(__i386__) && __GNUC__ >= 2
229 __asm("pushl %0; call __mcount; popl %%ecx"
230 :
231 : "i" (profil)
232 : "ax", "bx", "cx", "dx", "memory");
233 #elif defined(lint)
234 #else
235 #error
236 #endif
237 mcount_overhead = KCOUNT(p, PC_TO_I(p, profil));
238
239 startguprof(p);
240 for (i = 0; i < CALIB_SCALE; i++)
241 #if defined(__i386__) && __GNUC__ >= 2
242 __asm("call " __XSTRING(HIDENAME(mexitcount)) "; 1:"
243 : : : "ax", "bx", "cx", "dx", "memory");
244 __asm("movl $1b,%0" : "=rm" (tmp_addr));
245 #elif defined(lint)
246 #else
247 #error
248 #endif
249 mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
250
251 p->state = GMON_PROF_OFF;
252 stopguprof(p);
253
254 critical_exit();
255
256 nullfunc_loop_profiled_time = 0;
257 for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
258 tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
259 tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
260 nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
261 #define CALIB_DOSCALE(count) (((count) + CALIB_SCALE / 3) / CALIB_SCALE)
262 #define c2n(count, freq) ((int)((count) * 1000000000LL / freq))
263 printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
264 CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
265 CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
266 CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
267 CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
268 CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
269 cputime_overhead -= empty_loop_time;
270 mcount_overhead -= empty_loop_time;
271 mexitcount_overhead -= empty_loop_time;
272
273 /*-
274 * Profiling overheads are determined by the times between the
275 * following events:
276 * MC1: mcount() is called
277 * MC2: cputime() (called from mcount()) latches the timer
278 * MC3: mcount() completes
279 * ME1: mexitcount() is called
280 * ME2: cputime() (called from mexitcount()) latches the timer
281 * ME3: mexitcount() completes.
282 * The times between the events vary slightly depending on instruction
283 * combination and cache misses, etc. Attempt to determine the
284 * minimum times. These can be subtracted from the profiling times
285 * without much risk of reducing the profiling times below what they
286 * would be when profiling is not configured. Abbreviate:
287 * ab = minimum time between MC1 and MC3
288 * a = minumum time between MC1 and MC2
289 * b = minimum time between MC2 and MC3
290 * cd = minimum time between ME1 and ME3
291 * c = minimum time between ME1 and ME2
292 * d = minimum time between ME2 and ME3.
293 * These satisfy the relations:
294 * ab <= mcount_overhead (just measured)
295 * a + b <= ab
296 * cd <= mexitcount_overhead (just measured)
297 * c + d <= cd
298 * a + d <= nullfunc_loop_profiled_time (just measured)
299 * a >= 0, b >= 0, c >= 0, d >= 0.
300 * Assume that ab and cd are equal to the minimums.
301 */
302 p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
303 p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
304 p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
305 - cputime_overhead);
306 nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
307 p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
308 - nullfunc_loop_overhead)
309 / 4);
310 p->mexitcount_pre_overhead = p->mexitcount_overhead
311 + p->cputime_overhead
312 - p->mexitcount_post_overhead;
313 p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
314 - p->mexitcount_post_overhead;
315 p->mcount_post_overhead = p->mcount_overhead
316 + p->cputime_overhead
317 - p->mcount_pre_overhead;
318 printf(
319 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
320 c2n(p->cputime_overhead, p->profrate),
321 c2n(p->mcount_overhead, p->profrate),
322 c2n(p->mcount_pre_overhead, p->profrate),
323 c2n(p->mcount_post_overhead, p->profrate),
324 c2n(p->cputime_overhead, p->profrate),
325 c2n(p->mexitcount_overhead, p->profrate),
326 c2n(p->mexitcount_pre_overhead, p->profrate),
327 c2n(p->mexitcount_post_overhead, p->profrate));
328 printf(
329 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
330 p->cputime_overhead, p->mcount_overhead,
331 p->mcount_pre_overhead, p->mcount_post_overhead,
332 p->cputime_overhead, p->mexitcount_overhead,
333 p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
334 #endif /* GUPROF */
335 }
336
337 /*
338 * Return kernel profiling information.
339 */
340 static int
341 sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
342 {
343 int *name = (int *) arg1;
344 u_int namelen = arg2;
345 struct gmonparam *gp = &_gmonparam;
346 int error;
347 int state;
348
349 /* all sysctl names at this level are terminal */
350 if (namelen != 1)
351 return (ENOTDIR); /* overloaded */
352
353 switch (name[0]) {
354 case GPROF_STATE:
355 state = gp->state;
356 error = sysctl_handle_int(oidp, &state, 0, req);
357 if (error)
358 return (error);
359 if (!req->newptr)
360 return (0);
361 if (state == GMON_PROF_OFF) {
362 gp->state = state;
363 PROC_LOCK(&proc0);
364 stopprofclock(&proc0);
365 PROC_UNLOCK(&proc0);
366 stopguprof(gp);
367 } else if (state == GMON_PROF_ON) {
368 gp->state = GMON_PROF_OFF;
369 stopguprof(gp);
370 gp->profrate = profhz;
371 PROC_LOCK(&proc0);
372 startprofclock(&proc0);
373 PROC_UNLOCK(&proc0);
374 gp->state = state;
375 #ifdef GUPROF
376 } else if (state == GMON_PROF_HIRES) {
377 gp->state = GMON_PROF_OFF;
378 PROC_LOCK(&proc0);
379 stopprofclock(&proc0);
380 PROC_UNLOCK(&proc0);
381 startguprof(gp);
382 gp->state = state;
383 #endif
384 } else if (state != gp->state)
385 return (EINVAL);
386 return (0);
387 case GPROF_COUNT:
388 return (sysctl_handle_opaque(oidp,
389 gp->kcount, gp->kcountsize, req));
390 case GPROF_FROMS:
391 return (sysctl_handle_opaque(oidp,
392 gp->froms, gp->fromssize, req));
393 case GPROF_TOS:
394 return (sysctl_handle_opaque(oidp,
395 gp->tos, gp->tossize, req));
396 case GPROF_GMONPARAM:
397 return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
398 default:
399 return (EOPNOTSUPP);
400 }
401 /* NOTREACHED */
402 }
403
404 SYSCTL_NODE(_kern, KERN_PROF, prof, CTLFLAG_RW, sysctl_kern_prof, "");
405 #endif /* GPROF */
406
407 /*
408 * Profiling system call.
409 *
410 * The scale factor is a fixed point number with 16 bits of fraction, so that
411 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
412 */
413 #ifndef _SYS_SYSPROTO_H_
414 struct profil_args {
415 caddr_t samples;
416 size_t size;
417 size_t offset;
418 u_int scale;
419 };
420 #endif
421 /*
422 * MPSAFE
423 */
424 /* ARGSUSED */
425 int
426 profil(td, uap)
427 struct thread *td;
428 register struct profil_args *uap;
429 {
430 struct uprof *upp;
431 struct proc *p;
432
433 if (uap->scale > (1 << 16))
434 return (EINVAL);
435
436 p = td->td_proc;
437 if (uap->scale == 0) {
438 PROC_LOCK(td->td_proc);
439 stopprofclock(td->td_proc);
440 PROC_UNLOCK(td->td_proc);
441 return (0);
442 }
443 upp = &td->td_proc->p_stats->p_prof;
444 upp->pr_off = uap->offset;
445 upp->pr_scale = uap->scale;
446 upp->pr_base = uap->samples;
447 upp->pr_size = uap->size;
448 PROC_LOCK(p);
449 startprofclock(p);
450 PROC_UNLOCK(p);
451
452 return (0);
453 }
454
455 /*
456 * Scale is a fixed-point number with the binary point 16 bits
457 * into the value, and is <= 1.0. pc is at most 32 bits, so the
458 * intermediate result is at most 48 bits.
459 */
460 #define PC_TO_INDEX(pc, prof) \
461 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
462 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
463
464 /*
465 * Collect user-level profiling statistics; called on a profiling tick,
466 * when a process is running in user-mode. This routine may be called
467 * from an interrupt context. We try to update the user profiling buffers
468 * cheaply with fuswintr() and suswintr(). If that fails, we revert to
469 * an AST that will vector us to trap() with a context in which copyin
470 * and copyout will work. Trap will then call addupc_task().
471 *
472 * Note that we may (rarely) not get around to the AST soon enough, and
473 * lose profile ticks when the next tick overwrites this one, but in this
474 * case the system is overloaded and the profile is probably already
475 * inaccurate.
476 */
477 void
478 addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
479 {
480 struct uprof *prof;
481 caddr_t addr;
482 u_int i;
483 int v;
484
485 if (ticks == 0)
486 return;
487 prof = &td->td_proc->p_stats->p_prof;
488 if (pc < prof->pr_off ||
489 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
490 return; /* out of range; ignore */
491
492 addr = prof->pr_base + i;
493 if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
494 mtx_lock_spin(&sched_lock);
495 prof->pr_addr = pc;
496 prof->pr_ticks = ticks;
497 td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING ;
498 mtx_unlock_spin(&sched_lock);
499 }
500 }
501
502 /*
503 * Much like before, but we can afford to take faults here. If the
504 * update fails, we simply turn off profiling.
505 */
506 void
507 addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
508 {
509 struct proc *p = td->td_proc;
510 struct uprof *prof;
511 caddr_t addr;
512 u_int i;
513 u_short v;
514 int stop = 0;
515
516 if (ticks == 0)
517 return;
518
519 PROC_LOCK(p);
520 if (!(p->p_flag & P_PROFIL)) {
521 PROC_UNLOCK(p);
522 return;
523 }
524 p->p_profthreads++;
525 PROC_UNLOCK(p);
526 prof = &p->p_stats->p_prof;
527 if (pc < prof->pr_off ||
528 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
529 goto out;
530 }
531
532 addr = prof->pr_base + i;
533 if (copyin(addr, &v, sizeof(v)) == 0) {
534 v += ticks;
535 if (copyout(&v, addr, sizeof(v)) == 0)
536 goto out;
537 }
538 stop = 1;
539
540 out:
541 PROC_LOCK(p);
542 if (--p->p_profthreads == 0) {
543 if (p->p_flag & P_STOPPROF) {
544 wakeup(&p->p_profthreads);
545 stop = 0;
546 }
547 }
548 if (stop)
549 stopprofclock(p);
550 PROC_UNLOCK(p);
551 }
552
553 #if defined(__i386__) && __GNUC__ >= 2
554 /*
555 * Support for "--test-coverage --profile-arcs" in GCC.
556 *
557 * We need to call all the functions in the .ctor section, in order
558 * to get all the counter-arrays strung into a list.
559 *
560 * XXX: the .ctors call __bb_init_func which is located in over in
561 * XXX: i386/i386/support.s for historical reasons. There is probably
562 * XXX: no reason for that to be assembler anymore, but doing it right
563 * XXX: in MI C code requires one to reverse-engineer the type-selection
564 * XXX: inside GCC. Have fun.
565 *
566 * XXX: Worrisome perspective: Calling the .ctors may make C++ in the
567 * XXX: kernel feasible. Don't.
568 */
569 typedef void (*ctor_t)(void);
570 extern ctor_t _start_ctors, _stop_ctors;
571
572 static void
573 tcov_init(void *foo __unused)
574 {
575 ctor_t *p, q;
576
577 for (p = &_start_ctors; p < &_stop_ctors; p++) {
578 q = *p;
579 q();
580 }
581 }
582
583 SYSINIT(tcov_init, SI_SUB_KPROF, SI_ORDER_SECOND, tcov_init, NULL)
584
585 /*
586 * GCC contains magic to recognize calls to for instance execve() and
587 * puts in calls to this function to preserve the profile counters.
588 * XXX: Put zinging punchline here.
589 */
590 void __bb_fork_func(void);
591 void
592 __bb_fork_func(void)
593 {
594 }
595
596 #endif
597
Cache object: 991fee1983b6de7fa687ea5adaff1fdb
|