The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/libkern/mcount.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1983, 1992, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.4/sys/libkern/mcount.c 199583 2009-11-20 15:27:52Z jhb $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/gmon.h>
   35 #ifdef _KERNEL
   36 #ifndef GUPROF
   37 #include <sys/systm.h>
   38 #endif
   39 #include <vm/vm.h>
   40 #include <vm/vm_param.h>
   41 #include <vm/pmap.h>
   42 #endif
   43 
   44 /*
   45  * mcount is called on entry to each function compiled with the profiling
   46  * switch set.  _mcount(), which is declared in a machine-dependent way
   47  * with _MCOUNT_DECL, does the actual work and is either inlined into a
   48  * C routine or called by an assembly stub.  In any case, this magic is
   49  * taken care of by the MCOUNT definition in <machine/profile.h>.
   50  *
   51  * _mcount updates data structures that represent traversals of the
   52  * program's call graph edges.  frompc and selfpc are the return
   53  * address and function address that represents the given call graph edge.
   54  *
   55  * Note: the original BSD code used the same variable (frompcindex) for
   56  * both frompcindex and frompc.  Any reasonable, modern compiler will
   57  * perform this optimization.
   58  */
   59 _MCOUNT_DECL(frompc, selfpc)    /* _mcount; may be static, inline, etc */
   60         uintfptr_t frompc, selfpc;
   61 {
   62 #ifdef GUPROF
   63         int delta;
   64 #endif
   65         fptrdiff_t frompci;
   66         u_short *frompcindex;
   67         struct tostruct *top, *prevtop;
   68         struct gmonparam *p;
   69         long toindex;
   70 #ifdef _KERNEL
   71         MCOUNT_DECL(s)
   72 #endif
   73 
   74         p = &_gmonparam;
   75 #ifndef GUPROF                  /* XXX */
   76         /*
   77          * check that we are profiling
   78          * and that we aren't recursively invoked.
   79          */
   80         if (p->state != GMON_PROF_ON)
   81                 return;
   82 #endif
   83 #ifdef _KERNEL
   84         MCOUNT_ENTER(s);
   85 #else
   86         p->state = GMON_PROF_BUSY;
   87 #endif
   88 
   89 #ifdef _KERNEL
   90         /*
   91          * When we are called from an exception handler, frompc may be
   92          * a user address.  Convert such frompc's to some representation
   93          * in kernel address space.
   94          */
   95         frompc = MCOUNT_FROMPC_USER(frompc);
   96 #endif
   97 
   98         frompci = frompc - p->lowpc;
   99         if (frompci >= p->textsize)
  100                 goto done;
  101 
  102 #ifdef GUPROF
  103         if (p->state == GMON_PROF_HIRES) {
  104                 /*
  105                  * Count the time since cputime() was previously called
  106                  * against `frompc'.  Compensate for overheads.
  107                  *
  108                  * cputime() sets its prev_count variable to the count when
  109                  * it is called.  This in effect starts a counter for
  110                  * the next period of execution (normally from now until 
  111                  * the next call to mcount() or mexitcount()).  We set
  112                  * cputime_bias to compensate for our own overhead.
  113                  *
  114                  * We use the usual sampling counters since they can be
  115                  * located efficiently.  4-byte counters are usually
  116                  * necessary.  gprof will add up the scattered counts
  117                  * just like it does for statistical profiling.  All
  118                  * counts are signed so that underflow in the subtractions
  119                  * doesn't matter much (negative counts are normally
  120                  * compensated for by larger counts elsewhere).  Underflow
  121                  * shouldn't occur, but may be caused by slightly wrong
  122                  * calibrations or from not clearing cputime_bias.
  123                  */
  124                 delta = cputime() - cputime_bias - p->mcount_pre_overhead;
  125                 cputime_bias = p->mcount_post_overhead;
  126                 KCOUNT(p, frompci) += delta;
  127                 *p->cputime_count += p->cputime_overhead;
  128                 *p->mcount_count += p->mcount_overhead;
  129         }
  130 #endif /* GUPROF */
  131 
  132 #ifdef _KERNEL
  133         /*
  134          * When we are called from an exception handler, frompc is faked
  135          * to be for where the exception occurred.  We've just solidified
  136          * the count for there.  Now convert frompci to an index that
  137          * represents the kind of exception so that interruptions appear
  138          * in the call graph as calls from those index instead of calls
  139          * from all over.
  140          */
  141         frompc = MCOUNT_FROMPC_INTR(selfpc);
  142         if ((frompc - p->lowpc) < p->textsize)
  143                 frompci = frompc - p->lowpc;
  144 #endif
  145 
  146         /*
  147          * check that frompc is a reasonable pc value.
  148          * for example: signal catchers get called from the stack,
  149          *              not from text space.  too bad.
  150          */
  151         if (frompci >= p->textsize)
  152                 goto done;
  153 
  154         frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
  155         toindex = *frompcindex;
  156         if (toindex == 0) {
  157                 /*
  158                  *      first time traversing this arc
  159                  */
  160                 toindex = ++p->tos[0].link;
  161                 if (toindex >= p->tolimit)
  162                         /* halt further profiling */
  163                         goto overflow;
  164 
  165                 *frompcindex = toindex;
  166                 top = &p->tos[toindex];
  167                 top->selfpc = selfpc;
  168                 top->count = 1;
  169                 top->link = 0;
  170                 goto done;
  171         }
  172         top = &p->tos[toindex];
  173         if (top->selfpc == selfpc) {
  174                 /*
  175                  * arc at front of chain; usual case.
  176                  */
  177                 top->count++;
  178                 goto done;
  179         }
  180         /*
  181          * have to go looking down chain for it.
  182          * top points to what we are looking at,
  183          * prevtop points to previous top.
  184          * we know it is not at the head of the chain.
  185          */
  186         for (; /* goto done */; ) {
  187                 if (top->link == 0) {
  188                         /*
  189                          * top is end of the chain and none of the chain
  190                          * had top->selfpc == selfpc.
  191                          * so we allocate a new tostruct
  192                          * and link it to the head of the chain.
  193                          */
  194                         toindex = ++p->tos[0].link;
  195                         if (toindex >= p->tolimit)
  196                                 goto overflow;
  197 
  198                         top = &p->tos[toindex];
  199                         top->selfpc = selfpc;
  200                         top->count = 1;
  201                         top->link = *frompcindex;
  202                         *frompcindex = toindex;
  203                         goto done;
  204                 }
  205                 /*
  206                  * otherwise, check the next arc on the chain.
  207                  */
  208                 prevtop = top;
  209                 top = &p->tos[top->link];
  210                 if (top->selfpc == selfpc) {
  211                         /*
  212                          * there it is.
  213                          * increment its count
  214                          * move it to the head of the chain.
  215                          */
  216                         top->count++;
  217                         toindex = prevtop->link;
  218                         prevtop->link = top->link;
  219                         top->link = *frompcindex;
  220                         *frompcindex = toindex;
  221                         goto done;
  222                 }
  223 
  224         }
  225 done:
  226 #ifdef _KERNEL
  227         MCOUNT_EXIT(s);
  228 #else
  229         p->state = GMON_PROF_ON;
  230 #endif
  231         return;
  232 overflow:
  233         p->state = GMON_PROF_ERROR;
  234 #ifdef _KERNEL
  235         MCOUNT_EXIT(s);
  236 #endif
  237         return;
  238 }
  239 
  240 /*
  241  * Actual definition of mcount function.  Defined in <machine/profile.h>,
  242  * which is included by <sys/gmon.h>.
  243  */
  244 MCOUNT
  245 
  246 #ifdef GUPROF
  247 void
  248 mexitcount(selfpc)
  249         uintfptr_t selfpc;
  250 {
  251         struct gmonparam *p;
  252         uintfptr_t selfpcdiff;
  253 
  254         p = &_gmonparam;
  255         selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
  256         if (selfpcdiff < p->textsize) {
  257                 int delta;
  258 
  259                 /*
  260                  * Count the time since cputime() was previously called
  261                  * against `selfpc'.  Compensate for overheads.
  262                  */
  263                 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
  264                 cputime_bias = p->mexitcount_post_overhead;
  265                 KCOUNT(p, selfpcdiff) += delta;
  266                 *p->cputime_count += p->cputime_overhead;
  267                 *p->mexitcount_count += p->mexitcount_overhead;
  268         }
  269 }
  270 
  271 #ifndef __GNUCLIKE_ASM
  272 #error "This file uses null asms to prevent timing loops being optimized away."
  273 #endif
  274 
  275 void
  276 empty_loop()
  277 {
  278         int i;
  279 
  280         for (i = 0; i < CALIB_SCALE; i++)
  281                 __asm __volatile("");
  282 }
  283 
  284 void
  285 nullfunc()
  286 {
  287         __asm __volatile("");
  288 }
  289 
  290 void
  291 nullfunc_loop()
  292 {
  293         int i;
  294 
  295         for (i = 0; i < CALIB_SCALE; i++)
  296                 nullfunc();
  297 }
  298 #endif /* GUPROF */

Cache object: 3d17515ba9c6871a09bbbb91dec204cd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.