The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_prof.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/sysproto.h>
   38 #include <sys/kernel.h>
   39 #include <sys/lock.h>
   40 #include <sys/mutex.h>
   41 #include <sys/proc.h>
   42 #include <sys/resourcevar.h>
   43 #include <sys/sysctl.h>
   44 
   45 #include <machine/cpu.h>
   46 
   47 #ifdef GPROF
   48 #include <sys/malloc.h>
   49 #include <sys/gmon.h>
   50 #undef MCOUNT
   51 
   52 static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
   53 
   54 static void kmstartup(void *);
   55 SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL)
   56 
   57 struct gmonparam _gmonparam = { GMON_PROF_OFF };
   58 
   59 #ifdef GUPROF
   60 void
   61 nullfunc_loop_profiled()
   62 {
   63         int i;
   64 
   65         for (i = 0; i < CALIB_SCALE; i++)
   66                 nullfunc_profiled();
   67 }
   68 
   69 #define nullfunc_loop_profiled_end      nullfunc_profiled       /* XXX */
   70 
   71 void
   72 nullfunc_profiled()
   73 {
   74 }
   75 #endif /* GUPROF */
   76 
   77 /*
   78  * Update the histograms to support extending the text region arbitrarily.
   79  * This is done slightly naively (no sparse regions), so will waste slight
   80  * amounts of memory, but will overall work nicely enough to allow profiling
   81  * of KLDs.
   82  */
   83 void
   84 kmupetext(uintfptr_t nhighpc)
   85 {
   86         struct gmonparam np;    /* slightly large */
   87         struct gmonparam *p = &_gmonparam;
   88         char *cp;
   89 
   90         GIANT_REQUIRED;
   91         bcopy(p, &np, sizeof(*p));
   92         np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
   93         if (np.highpc <= p->highpc)
   94                 return;
   95         np.textsize = np.highpc - p->lowpc;
   96         np.kcountsize = np.textsize / HISTFRACTION;
   97         np.hashfraction = HASHFRACTION;
   98         np.fromssize = np.textsize / HASHFRACTION;
   99         np.tolimit = np.textsize * ARCDENSITY / 100;
  100         if (np.tolimit < MINARCS)
  101                 np.tolimit = MINARCS;
  102         else if (np.tolimit > MAXARCS)
  103                 np.tolimit = MAXARCS;
  104         np.tossize = np.tolimit * sizeof(struct tostruct);
  105         cp = malloc(np.kcountsize + np.fromssize + np.tossize,
  106             M_GPROF, M_WAITOK);
  107         /*
  108          * Check for something else extending highpc while we slept.
  109          */
  110         if (np.highpc <= p->highpc) {
  111                 free(cp, M_GPROF);
  112                 return;
  113         }
  114         np.tos = (struct tostruct *)cp;
  115         cp += np.tossize;
  116         np.kcount = (HISTCOUNTER *)cp;
  117         cp += np.kcountsize;
  118         np.froms = (u_short *)cp;
  119 #ifdef GUPROF
  120         /* Reinitialize pointers to overhead counters. */
  121         np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
  122         np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
  123         np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
  124 #endif
  125         critical_enter();
  126         bcopy(p->tos, np.tos, p->tossize);
  127         bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
  128         bcopy(p->kcount, np.kcount, p->kcountsize);
  129         bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
  130             p->kcountsize);
  131         bcopy(p->froms, np.froms, p->fromssize);
  132         bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
  133         cp = (char *)p->tos;
  134         bcopy(&np, p, sizeof(*p));
  135         critical_exit();
  136         free(cp, M_GPROF);
  137 }
  138 
  139 static void
  140 kmstartup(dummy)
  141         void *dummy;
  142 {
  143         char *cp;
  144         struct gmonparam *p = &_gmonparam;
  145 #ifdef GUPROF
  146         int cputime_overhead;
  147         int empty_loop_time;
  148         int i;
  149         int mcount_overhead;
  150         int mexitcount_overhead;
  151         int nullfunc_loop_overhead;
  152         int nullfunc_loop_profiled_time;
  153         uintfptr_t tmp_addr;
  154 #endif
  155 
  156         /*
  157          * Round lowpc and highpc to multiples of the density we're using
  158          * so the rest of the scaling (here and in gprof) stays in ints.
  159          */
  160         p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
  161         p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
  162         p->textsize = p->highpc - p->lowpc;
  163         printf("Profiling kernel, textsize=%lu [%jx..%jx]\n",
  164             p->textsize, (uintmax_t)p->lowpc, (uintmax_t)p->highpc);
  165         p->kcountsize = p->textsize / HISTFRACTION;
  166         p->hashfraction = HASHFRACTION;
  167         p->fromssize = p->textsize / HASHFRACTION;
  168         p->tolimit = p->textsize * ARCDENSITY / 100;
  169         if (p->tolimit < MINARCS)
  170                 p->tolimit = MINARCS;
  171         else if (p->tolimit > MAXARCS)
  172                 p->tolimit = MAXARCS;
  173         p->tossize = p->tolimit * sizeof(struct tostruct);
  174         cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
  175             M_GPROF, M_WAITOK | M_ZERO);
  176         p->tos = (struct tostruct *)cp;
  177         cp += p->tossize;
  178         p->kcount = (HISTCOUNTER *)cp;
  179         cp += p->kcountsize;
  180         p->froms = (u_short *)cp;
  181         p->histcounter_type = FUNCTION_ALIGNMENT / HISTFRACTION * NBBY;
  182 
  183 #ifdef GUPROF
  184         /* Signed counters. */
  185         p->histcounter_type = -p->histcounter_type;
  186 
  187         /* Initialize pointers to overhead counters. */
  188         p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
  189         p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
  190         p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
  191 
  192         /*
  193          * Disable interrupts to avoid interference while we calibrate
  194          * things.
  195          */
  196         critical_enter();
  197 
  198         /*
  199          * Determine overheads.
  200          * XXX this needs to be repeated for each useful timer/counter.
  201          */
  202         cputime_overhead = 0;
  203         startguprof(p);
  204         for (i = 0; i < CALIB_SCALE; i++)
  205                 cputime_overhead += cputime();
  206 
  207         empty_loop();
  208         startguprof(p);
  209         empty_loop();
  210         empty_loop_time = cputime();
  211 
  212         nullfunc_loop_profiled();
  213 
  214         /*
  215          * Start profiling.  There won't be any normal function calls since
  216          * interrupts are disabled, but we will call the profiling routines
  217          * directly to determine their overheads.
  218          */
  219         p->state = GMON_PROF_HIRES;
  220 
  221         startguprof(p);
  222         nullfunc_loop_profiled();
  223 
  224         startguprof(p);
  225         for (i = 0; i < CALIB_SCALE; i++)
  226                 MCOUNT_OVERHEAD(profil);
  227         mcount_overhead = KCOUNT(p, PC_TO_I(p, profil));
  228 
  229         startguprof(p);
  230         for (i = 0; i < CALIB_SCALE; i++)
  231                 MEXITCOUNT_OVERHEAD();
  232         MEXITCOUNT_OVERHEAD_GETLABEL(tmp_addr);
  233         mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
  234 
  235         p->state = GMON_PROF_OFF;
  236         stopguprof(p);
  237 
  238         critical_exit();
  239 
  240         nullfunc_loop_profiled_time = 0;
  241         for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
  242              tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
  243              tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
  244                 nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
  245 #define CALIB_DOSCALE(count)    (((count) + CALIB_SCALE / 3) / CALIB_SCALE)
  246 #define c2n(count, freq)        ((int)((count) * 1000000000LL / freq))
  247         printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
  248                CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
  249                CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
  250                CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
  251                CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
  252                CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
  253         cputime_overhead -= empty_loop_time;
  254         mcount_overhead -= empty_loop_time;
  255         mexitcount_overhead -= empty_loop_time;
  256 
  257         /*-
  258          * Profiling overheads are determined by the times between the
  259          * following events:
  260          *      MC1: mcount() is called
  261          *      MC2: cputime() (called from mcount()) latches the timer
  262          *      MC3: mcount() completes
  263          *      ME1: mexitcount() is called
  264          *      ME2: cputime() (called from mexitcount()) latches the timer
  265          *      ME3: mexitcount() completes.
  266          * The times between the events vary slightly depending on instruction
  267          * combination and cache misses, etc.  Attempt to determine the
  268          * minimum times.  These can be subtracted from the profiling times
  269          * without much risk of reducing the profiling times below what they
  270          * would be when profiling is not configured.  Abbreviate:
  271          *      ab = minimum time between MC1 and MC3
  272          *      a  = minumum time between MC1 and MC2
  273          *      b  = minimum time between MC2 and MC3
  274          *      cd = minimum time between ME1 and ME3
  275          *      c  = minimum time between ME1 and ME2
  276          *      d  = minimum time between ME2 and ME3.
  277          * These satisfy the relations:
  278          *      ab            <= mcount_overhead                (just measured)
  279          *      a + b         <= ab
  280          *              cd    <= mexitcount_overhead            (just measured)
  281          *              c + d <= cd
  282          *      a         + d <= nullfunc_loop_profiled_time    (just measured)
  283          *      a >= 0, b >= 0, c >= 0, d >= 0.
  284          * Assume that ab and cd are equal to the minimums.
  285          */
  286         p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
  287         p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
  288         p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
  289                                                - cputime_overhead);
  290         nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
  291         p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
  292                                                      - nullfunc_loop_overhead)
  293                                                     / 4);
  294         p->mexitcount_pre_overhead = p->mexitcount_overhead
  295                                      + p->cputime_overhead
  296                                      - p->mexitcount_post_overhead;
  297         p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
  298                                  - p->mexitcount_post_overhead;
  299         p->mcount_post_overhead = p->mcount_overhead
  300                                   + p->cputime_overhead
  301                                   - p->mcount_pre_overhead;
  302         printf(
  303 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
  304                c2n(p->cputime_overhead, p->profrate),
  305                c2n(p->mcount_overhead, p->profrate),
  306                c2n(p->mcount_pre_overhead, p->profrate),
  307                c2n(p->mcount_post_overhead, p->profrate),
  308                c2n(p->cputime_overhead, p->profrate),
  309                c2n(p->mexitcount_overhead, p->profrate),
  310                c2n(p->mexitcount_pre_overhead, p->profrate),
  311                c2n(p->mexitcount_post_overhead, p->profrate));
  312         printf(
  313 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
  314                p->cputime_overhead, p->mcount_overhead,
  315                p->mcount_pre_overhead, p->mcount_post_overhead,
  316                p->cputime_overhead, p->mexitcount_overhead,
  317                p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
  318 #endif /* GUPROF */
  319 }
  320 
  321 /*
  322  * Return kernel profiling information.
  323  */
  324 static int
  325 sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
  326 {
  327         int *name = (int *) arg1;
  328         u_int namelen = arg2;
  329         struct gmonparam *gp = &_gmonparam;
  330         int error;
  331         int state;
  332 
  333         /* all sysctl names at this level are terminal */
  334         if (namelen != 1)
  335                 return (ENOTDIR);               /* overloaded */
  336 
  337         switch (name[0]) {
  338         case GPROF_STATE:
  339                 state = gp->state;
  340                 error = sysctl_handle_int(oidp, &state, 0, req);
  341                 if (error)
  342                         return (error);
  343                 if (!req->newptr)
  344                         return (0);
  345                 if (state == GMON_PROF_OFF) {
  346                         gp->state = state;
  347                         PROC_LOCK(&proc0);
  348                         stopprofclock(&proc0);
  349                         PROC_UNLOCK(&proc0);
  350                         stopguprof(gp);
  351                 } else if (state == GMON_PROF_ON) {
  352                         gp->state = GMON_PROF_OFF;
  353                         stopguprof(gp);
  354                         gp->profrate = profhz;
  355                         PROC_LOCK(&proc0);
  356                         startprofclock(&proc0);
  357                         PROC_UNLOCK(&proc0);
  358                         gp->state = state;
  359 #ifdef GUPROF
  360                 } else if (state == GMON_PROF_HIRES) {
  361                         gp->state = GMON_PROF_OFF;
  362                         PROC_LOCK(&proc0);
  363                         stopprofclock(&proc0);
  364                         PROC_UNLOCK(&proc0);
  365                         startguprof(gp);
  366                         gp->state = state;
  367 #endif
  368                 } else if (state != gp->state)
  369                         return (EINVAL);
  370                 return (0);
  371         case GPROF_COUNT:
  372                 return (sysctl_handle_opaque(oidp, 
  373                         gp->kcount, gp->kcountsize, req));
  374         case GPROF_FROMS:
  375                 return (sysctl_handle_opaque(oidp, 
  376                         gp->froms, gp->fromssize, req));
  377         case GPROF_TOS:
  378                 return (sysctl_handle_opaque(oidp, 
  379                         gp->tos, gp->tossize, req));
  380         case GPROF_GMONPARAM:
  381                 return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
  382         default:
  383                 return (EOPNOTSUPP);
  384         }
  385         /* NOTREACHED */
  386 }
  387 
  388 SYSCTL_NODE(_kern, KERN_PROF, prof, CTLFLAG_RW, sysctl_kern_prof, "");
  389 #endif /* GPROF */
  390 
  391 /*
  392  * Profiling system call.
  393  *
  394  * The scale factor is a fixed point number with 16 bits of fraction, so that
  395  * 1.0 is represented as 0x10000.  A scale factor of 0 turns off profiling.
  396  */
  397 #ifndef _SYS_SYSPROTO_H_
  398 struct profil_args {
  399         caddr_t samples;
  400         size_t  size;
  401         size_t  offset;
  402         u_int   scale;
  403 };
  404 #endif
  405 /* ARGSUSED */
  406 int
  407 profil(td, uap)
  408         struct thread *td;
  409         register struct profil_args *uap;
  410 {
  411         struct uprof *upp;
  412         struct proc *p;
  413 
  414         if (uap->scale > (1 << 16))
  415                 return (EINVAL);
  416 
  417         p = td->td_proc;
  418         if (uap->scale == 0) {
  419                 PROC_LOCK(p);
  420                 stopprofclock(p);
  421                 PROC_UNLOCK(p);
  422                 return (0);
  423         }
  424         PROC_LOCK(p);
  425         upp = &td->td_proc->p_stats->p_prof;
  426         PROC_SLOCK(p);
  427         upp->pr_off = uap->offset;
  428         upp->pr_scale = uap->scale;
  429         upp->pr_base = uap->samples;
  430         upp->pr_size = uap->size;
  431         PROC_SUNLOCK(p);
  432         startprofclock(p);
  433         PROC_UNLOCK(p);
  434 
  435         return (0);
  436 }
  437 
  438 /*
  439  * Scale is a fixed-point number with the binary point 16 bits
  440  * into the value, and is <= 1.0.  pc is at most 32 bits, so the
  441  * intermediate result is at most 48 bits.
  442  */
  443 #define PC_TO_INDEX(pc, prof) \
  444         ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
  445             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
  446 
  447 /*
  448  * Collect user-level profiling statistics; called on a profiling tick,
  449  * when a process is running in user-mode.  This routine may be called
  450  * from an interrupt context.  We try to update the user profiling buffers
  451  * cheaply with fuswintr() and suswintr().  If that fails, we revert to
  452  * an AST that will vector us to trap() with a context in which copyin
  453  * and copyout will work.  Trap will then call addupc_task().
  454  *
  455  * Note that we may (rarely) not get around to the AST soon enough, and
  456  * lose profile ticks when the next tick overwrites this one, but in this
  457  * case the system is overloaded and the profile is probably already
  458  * inaccurate.
  459  */
  460 void
  461 addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
  462 {
  463         struct uprof *prof;
  464         caddr_t addr;
  465         u_int i;
  466         int v;
  467 
  468         if (ticks == 0)
  469                 return;
  470         prof = &td->td_proc->p_stats->p_prof;
  471         PROC_SLOCK(td->td_proc);
  472         if (pc < prof->pr_off ||
  473             (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
  474                 PROC_SUNLOCK(td->td_proc);
  475                 return;                 /* out of range; ignore */
  476         }
  477 
  478         addr = prof->pr_base + i;
  479         PROC_SUNLOCK(td->td_proc);
  480         if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
  481                 td->td_profil_addr = pc;
  482                 td->td_profil_ticks = ticks;
  483                 td->td_pflags |= TDP_OWEUPC;
  484                 thread_lock(td);
  485                 td->td_flags |= TDF_ASTPENDING;
  486                 thread_unlock(td);
  487         }
  488 }
  489 
  490 /*
  491  * Much like before, but we can afford to take faults here.  If the
  492  * update fails, we simply turn off profiling.
  493  */
  494 void
  495 addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
  496 {
  497         struct proc *p = td->td_proc; 
  498         struct uprof *prof;
  499         caddr_t addr;
  500         u_int i;
  501         u_short v;
  502         int stop = 0;
  503 
  504         if (ticks == 0)
  505                 return;
  506 
  507         PROC_LOCK(p);
  508         if (!(p->p_flag & P_PROFIL)) {
  509                 PROC_UNLOCK(p);
  510                 return;
  511         }
  512         p->p_profthreads++;
  513         prof = &p->p_stats->p_prof;
  514         PROC_SLOCK(p);
  515         if (pc < prof->pr_off ||
  516             (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
  517                 PROC_SUNLOCK(p);
  518                 goto out;
  519         }
  520 
  521         addr = prof->pr_base + i;
  522         PROC_SUNLOCK(p);
  523         PROC_UNLOCK(p);
  524         if (copyin(addr, &v, sizeof(v)) == 0) {
  525                 v += ticks;
  526                 if (copyout(&v, addr, sizeof(v)) == 0) {
  527                         PROC_LOCK(p);
  528                         goto out;
  529                 }
  530         }
  531         stop = 1;
  532         PROC_LOCK(p);
  533 
  534 out:
  535         if (--p->p_profthreads == 0) {
  536                 if (p->p_flag & P_STOPPROF) {
  537                         wakeup(&p->p_profthreads);
  538                         stop = 0;
  539                 }
  540         }
  541         if (stop)
  542                 stopprofclock(p);
  543         PROC_UNLOCK(p);
  544 }
  545 
  546 #if (defined(__amd64__) || defined(__i386__)) && \
  547         defined(__GNUCLIKE_CTOR_SECTION_HANDLING)
  548 /*
  549  * Support for "--test-coverage --profile-arcs" in GCC.
  550  *
  551  * We need to call all the functions in the .ctor section, in order
  552  * to get all the counter-arrays strung into a list.
  553  *
  554  * XXX: the .ctors call __bb_init_func which is located in over in 
  555  * XXX: i386/i386/support.s for historical reasons.  There is probably
  556  * XXX: no reason for that to be assembler anymore, but doing it right
  557  * XXX: in MI C code requires one to reverse-engineer the type-selection
  558  * XXX: inside GCC.  Have fun.
  559  *
  560  * XXX: Worrisome perspective: Calling the .ctors may make C++ in the
  561  * XXX: kernel feasible.  Don't.
  562  */
  563 typedef void (*ctor_t)(void);
  564 extern ctor_t _start_ctors, _stop_ctors;
  565 
  566 static void
  567 tcov_init(void *foo __unused)
  568 {
  569         ctor_t *p, q;
  570 
  571         for (p = &_start_ctors; p < &_stop_ctors; p++) {
  572                 q = *p;
  573                 q();
  574         }
  575 }
  576 
  577 SYSINIT(tcov_init, SI_SUB_KPROF, SI_ORDER_SECOND, tcov_init, NULL)
  578 
  579 /*
  580  * GCC contains magic to recognize calls to for instance execve() and
  581  * puts in calls to this function to preserve the profile counters.
  582  * XXX: Put zinging punchline here.
  583  */
  584 void __bb_fork_func(void);
  585 void
  586 __bb_fork_func(void)
  587 {
  588 }
  589 
  590 #endif
  591 

Cache object: b06188fd7d6c8278d9549fa8d53b4fd6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.