The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/libkern/mcount.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1983, 1992, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: src/sys/libkern/mcount.c,v 1.16 1999/12/29 04:54:41 peter Exp $
   30  */
   31 
   32 #include <sys/param.h>
   33 #include <sys/gmon.h>
   34 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
   35 #ifndef GUPROF
   36 #include <sys/systm.h>
   37 #endif
   38 #include <vm/vm.h>
   39 #include <vm/vm_param.h>
   40 #include <vm/pmap.h>
   41 void    bintr(void);
   42 void    btrap(void);
   43 void    eintr(void);
   44 void    user(void);
   45 #endif
   46 
   47 /*
   48  * mcount is called on entry to each function compiled with the profiling
   49  * switch set.  _mcount(), which is declared in a machine-dependent way
   50  * with _MCOUNT_DECL, does the actual work and is either inlined into a
   51  * C routine or called by an assembly stub.  In any case, this magic is
   52  * taken care of by the MCOUNT definition in <machine/profile.h>.
   53  *
   54  * _mcount updates data structures that represent traversals of the
   55  * program's call graph edges.  frompc and selfpc are the return
   56  * address and function address that represents the given call graph edge.
   57  *
   58  * Note: the original BSD code used the same variable (frompcindex) for
   59  * both frompcindex and frompc.  Any reasonable, modern compiler will
   60  * perform this optimization.
   61  */
   62 /* _mcount; may be static, inline, etc */
   63 _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
   64 {
   65 #ifdef GUPROF
   66         int delta;
   67 #endif
   68         fptrdiff_t frompci;
   69         u_short *frompcindex;
   70         struct tostruct *top, *prevtop;
   71         struct gmonparam *p;
   72         long toindex;
   73 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
   74         MCOUNT_DECL(s)
   75 #endif
   76 
   77         p = &_gmonparam;
   78 #ifndef GUPROF                  /* XXX */
   79         /*
   80          * check that we are profiling
   81          * and that we aren't recursively invoked.
   82          */
   83         if (p->state != GMON_PROF_ON)
   84                 return;
   85 #endif
   86 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
   87         MCOUNT_ENTER(s);
   88 #else
   89         p->state = GMON_PROF_BUSY;
   90 #endif
   91         frompci = frompc - p->lowpc;
   92 
   93 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
   94         /*
   95          * When we are called from an exception handler, frompci may be
   96          * for a user address.  Convert such frompci's to the index of
   97          * user() to merge all user counts.
   98          *
   99          * XXX doesn't work properly with vkernel
  100          */
  101         if (frompci >= p->textsize) {
  102                 if (frompci + p->lowpc
  103                     >= (uintfptr_t)(VM_MAX_USER_ADDRESS + UPAGES * PAGE_SIZE))
  104                         goto done;
  105                 frompci = (uintfptr_t)user - p->lowpc;
  106                 if (frompci >= p->textsize)
  107                     goto done;
  108         }
  109 #endif
  110 
  111 #ifdef GUPROF
  112         if (p->state == GMON_PROF_HIRES) {
  113                 /*
  114                  * Count the time since cputime() was previously called
  115                  * against `frompc'.  Compensate for overheads.
  116                  *
  117                  * cputime() sets its prev_count variable to the count when
  118                  * it is called.  This in effect starts a counter for
  119                  * the next period of execution (normally from now until 
  120                  * the next call to mcount() or mexitcount()).  We set
  121                  * cputime_bias to compensate for our own overhead.
  122                  *
  123                  * We use the usual sampling counters since they can be
  124                  * located efficiently.  4-byte counters are usually
  125                  * necessary.  gprof will add up the scattered counts
  126                  * just like it does for statistical profiling.  All
  127                  * counts are signed so that underflow in the subtractions
  128                  * doesn't matter much (negative counts are normally
  129                  * compensated for by larger counts elsewhere).  Underflow
  130                  * shouldn't occur, but may be caused by slightly wrong
  131                  * calibrations or from not clearing cputime_bias.
  132                  */
  133                 delta = cputime() - cputime_bias - p->mcount_pre_overhead;
  134                 cputime_bias = p->mcount_post_overhead;
  135                 KCOUNT(p, frompci) += delta;
  136                 *p->cputime_count += p->cputime_overhead;
  137                 *p->mcount_count += p->mcount_overhead;
  138         }
  139 #endif /* GUPROF */
  140 
  141 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
  142         /*
  143          * When we are called from an exception handler, frompc is faked
  144          * to be for where the exception occurred.  We've just solidified
  145          * the count for there.  Now convert frompci to the index of btrap()
  146          * for trap handlers and bintr() for interrupt handlers to make
  147          * exceptions appear in the call graph as calls from btrap() and
  148          * bintr() instead of calls from all over.
  149          */
  150         if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
  151             && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
  152                 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
  153                         frompci = (uintfptr_t)bintr - p->lowpc;
  154                 else
  155                         frompci = (uintfptr_t)btrap - p->lowpc;
  156         }
  157 #endif
  158 
  159         /*
  160          * check that frompc is a reasonable pc value.
  161          * for example: signal catchers get called from the stack,
  162          *              not from text space.  too bad.
  163          */
  164         if (frompci >= p->textsize)
  165                 goto done;
  166 
  167         frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
  168         toindex = *frompcindex;
  169         if (toindex == 0) {
  170                 /*
  171                  *      first time traversing this arc
  172                  */
  173                 toindex = ++p->tos[0].link;
  174                 if (toindex >= p->tolimit)
  175                         /* halt further profiling */
  176                         goto overflow;
  177 
  178                 *frompcindex = toindex;
  179                 top = &p->tos[toindex];
  180                 top->selfpc = selfpc;
  181                 top->count = 1;
  182                 top->link = 0;
  183                 goto done;
  184         }
  185         top = &p->tos[toindex];
  186         if (top->selfpc == selfpc) {
  187                 /*
  188                  * arc at front of chain; usual case.
  189                  */
  190                 top->count++;
  191                 goto done;
  192         }
  193         /*
  194          * have to go looking down chain for it.
  195          * top points to what we are looking at,
  196          * prevtop points to previous top.
  197          * we know it is not at the head of the chain.
  198          */
  199         for (; /* goto done */; ) {
  200                 if (top->link == 0) {
  201                         /*
  202                          * top is end of the chain and none of the chain
  203                          * had top->selfpc == selfpc.
  204                          * so we allocate a new tostruct
  205                          * and link it to the head of the chain.
  206                          */
  207                         toindex = ++p->tos[0].link;
  208                         if (toindex >= p->tolimit)
  209                                 goto overflow;
  210 
  211                         top = &p->tos[toindex];
  212                         top->selfpc = selfpc;
  213                         top->count = 1;
  214                         top->link = *frompcindex;
  215                         *frompcindex = toindex;
  216                         goto done;
  217                 }
  218                 /*
  219                  * otherwise, check the next arc on the chain.
  220                  */
  221                 prevtop = top;
  222                 top = &p->tos[top->link];
  223                 if (top->selfpc == selfpc) {
  224                         /*
  225                          * there it is.
  226                          * increment its count
  227                          * move it to the head of the chain.
  228                          */
  229                         top->count++;
  230                         toindex = prevtop->link;
  231                         prevtop->link = top->link;
  232                         top->link = *frompcindex;
  233                         *frompcindex = toindex;
  234                         goto done;
  235                 }
  236 
  237         }
  238 done:
  239 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
  240         MCOUNT_EXIT(s);
  241 #else
  242         p->state = GMON_PROF_ON;
  243 #endif
  244         return;
  245 overflow:
  246         p->state = GMON_PROF_ERROR;
  247 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
  248         MCOUNT_EXIT(s);
  249 #endif
  250         return;
  251 }
  252 
  253 /*
  254  * Actual definition of mcount function.  Defined in <machine/profile.h>,
  255  * which is included by <sys/gmon.h>.
  256  */
  257 MCOUNT
  258 
  259 #ifdef GUPROF
  260 void
  261 mexitcount(uintfptr_t selfpc)
  262 {
  263         struct gmonparam *p;
  264         uintfptr_t selfpcdiff;
  265 
  266         p = &_gmonparam;
  267         selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
  268         if (selfpcdiff < p->textsize) {
  269                 int delta;
  270 
  271                 /*
  272                  * Count the time since cputime() was previously called
  273                  * against `selfpc'.  Compensate for overheads.
  274                  */
  275                 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
  276                 cputime_bias = p->mexitcount_post_overhead;
  277                 KCOUNT(p, selfpcdiff) += delta;
  278                 *p->cputime_count += p->cputime_overhead;
  279                 *p->mexitcount_count += p->mexitcount_overhead;
  280         }
  281 }
  282 
  283 void
  284 empty_loop(void)
  285 {
  286         int i;
  287 
  288         for (i = 0; i < CALIB_SCALE; i++)
  289                 ;
  290 }
  291 
  292 void
  293 nullfunc(void)
  294 {
  295 }
  296 
  297 void
  298 nullfunc_loop(void)
  299 {
  300         int i;
  301 
  302         for (i = 0; i < CALIB_SCALE; i++)
  303                 nullfunc();
  304 }
  305 #endif /* GUPROF */

Cache object: 329bdab6b45dc8fa6a1c8abea4f94381


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.