The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hwpmc/hwpmc_x86.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2005,2008 Joseph Koshy
    5  * Copyright (c) 2007 The FreeBSD Foundation
    6  * All rights reserved.
    7  *
    8  * Portions of this software were developed by A. Joseph Koshy under
    9  * sponsorship from the FreeBSD Foundation and Google, Inc.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/param.h>
   37 #include <sys/bus.h>
   38 #include <sys/pmc.h>
   39 #include <sys/proc.h>
   40 #include <sys/systm.h>
   41 
   42 #include <machine/cpu.h>
   43 #include <machine/cputypes.h>
   44 #include <machine/intr_machdep.h>
   45 #include <x86/apicvar.h>
   46 #include <machine/pmc_mdep.h>
   47 #include <machine/md_var.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/vm_param.h>
   51 #include <vm/pmap.h>
   52 
   53 #include "hwpmc_soft.h"
   54 
   55 /*
   56  * Attempt to walk a user call stack using a too-simple algorithm.
   57  * In the general case we need unwind information associated with
   58  * the executable to be able to walk the user stack.
   59  *
   60  * We are handed a trap frame laid down at the time the PMC interrupt
   61  * was taken.  If the application is using frame pointers, the saved
   62  * PC value could be:
   63  * a. at the beginning of a function before the stack frame is laid
   64  *    down,
   65  * b. just before a 'ret', after the stack frame has been taken off,
   66  * c. somewhere else in the function with a valid stack frame being
   67  *    present,
   68  *
   69  * If the application is not using frame pointers, this algorithm will
   70  * fail to yield an interesting call chain.
   71  *
   72  * TODO: figure out a way to use unwind information.
   73  */
   74 
   75 int
   76 pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
   77 {
   78         int n;
   79         uint32_t instr;
   80         uintptr_t fp, oldfp, pc, r, sp;
   81 
   82         KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
   83             __LINE__, (void *) tf));
   84 
   85         pc = PMC_TRAPFRAME_TO_PC(tf);
   86         oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
   87         sp = PMC_TRAPFRAME_TO_USER_SP(tf);
   88 
   89         *cc++ = pc; n = 1;
   90 
   91         r = fp + sizeof(uintptr_t); /* points to return address */
   92 
   93         if (!PMC_IN_USERSPACE(pc))
   94                 return (n);
   95 
   96         if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
   97                 return (n);
   98 
   99         if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
  100             PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
  101                 if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
  102                         return (n);
  103         } else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
  104                 sp += sizeof(uintptr_t);
  105                 if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
  106                         return (n);
  107         } else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
  108             copyin((void *) fp, &fp, sizeof(fp)) != 0)
  109                 return (n);
  110 
  111         for (; n < nframes;) {
  112                 if (pc == 0 || !PMC_IN_USERSPACE(pc))
  113                         break;
  114 
  115                 *cc++ = pc; n++;
  116 
  117                 if (fp < oldfp)
  118                         break;
  119 
  120                 r = fp + sizeof(uintptr_t); /* address of return address */
  121                 oldfp = fp;
  122 
  123                 if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
  124                     copyin((void *) fp, &fp, sizeof(fp)) != 0)
  125                         break;
  126         }
  127 
  128         return (n);
  129 }
  130 
  131 /*
  132  * Walking the kernel call stack.
  133  *
  134  * We are handed the trap frame laid down at the time the PMC
  135  * interrupt was taken.  The saved PC could be:
  136  * a. in the lowlevel trap handler, meaning that there isn't a C stack
  137  *    to traverse,
  138  * b. at the beginning of a function before the stack frame is laid
  139  *    down,
  140  * c. just before a 'ret', after the stack frame has been taken off,
  141  * d. somewhere else in a function with a valid stack frame being
  142  *    present.
  143  *
  144  * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
  145  * the return address is at [%ebp+4]/[%rbp+8].
  146  *
  147  * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
  148  * the frame pointer doesn't need to be changed when going up one
  149  * level in the stack.
  150  *
  151  * For case (a), we check if the PC lies in low-level trap handling
  152  * code, and if so we terminate our trace.
  153  */
  154 
  155 int __nosanitizeaddress __nosanitizememory
  156 pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
  157 {
  158         int n;
  159         uint32_t instr;
  160         uintptr_t fp, pc, r, sp, stackstart, stackend;
  161         struct thread *td;
  162 
  163         KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
  164             __LINE__));
  165 
  166         td = curthread;
  167         pc = PMC_TRAPFRAME_TO_PC(tf);
  168         fp = PMC_TRAPFRAME_TO_FP(tf);
  169         sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
  170 
  171         *cc++ = pc;
  172         r = fp + sizeof(uintptr_t); /* points to return address */
  173 
  174         if (nframes <= 1)
  175                 return (1);
  176 
  177         stackstart = (uintptr_t) td->td_kstack;
  178         stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
  179 
  180         if (PMC_IN_TRAP_HANDLER(pc) ||
  181             !PMC_IN_KERNEL(pc) ||
  182             !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
  183             !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
  184             !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
  185                 return (1);
  186 
  187         instr = *(uint32_t *) pc;
  188 
  189         /*
  190          * Determine whether the interrupted function was in the
  191          * processing of either laying down its stack frame or taking
  192          * it off.
  193          *
  194          * If we haven't started laying down a stack frame, or are
  195          * just about to return, then our caller's address is at
  196          * *sp, and we don't have a frame to unwind.
  197          */
  198         if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
  199             PMC_AT_FUNCTION_EPILOGUE_RET(instr))
  200                 pc = *(uintptr_t *) sp;
  201         else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
  202                 /*
  203                  * The code was midway through laying down a frame.
  204                  * At this point sp[0] has a frame back pointer,
  205                  * and the caller's address is therefore at sp[1].
  206                  */
  207                 sp += sizeof(uintptr_t);
  208                 if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
  209                         return (1);
  210                 pc = *(uintptr_t *) sp;
  211         } else {
  212                 /*
  213                  * Not in the function prologue or epilogue.
  214                  */
  215                 pc = *(uintptr_t *) r;
  216                 fp = *(uintptr_t *) fp;
  217         }
  218 
  219         for (n = 1; n < nframes; n++) {
  220                 *cc++ = pc;
  221 
  222                 if (PMC_IN_TRAP_HANDLER(pc))
  223                         break;
  224 
  225                 r = fp + sizeof(uintptr_t);
  226                 if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
  227                     !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
  228                         break;
  229                 pc = *(uintptr_t *) r;
  230                 fp = *(uintptr_t *) fp;
  231         }
  232 
  233         return (n);
  234 }
  235 
  236 /*
  237  * Machine dependent initialization for x86 class platforms.
  238  */
  239 
  240 struct pmc_mdep *
  241 pmc_md_initialize(void)
  242 {
  243         int i;
  244         struct pmc_mdep *md;
  245 
  246         /* determine the CPU kind */
  247         if (cpu_vendor_id == CPU_VENDOR_AMD ||
  248             cpu_vendor_id == CPU_VENDOR_HYGON)
  249                 md = pmc_amd_initialize();
  250         else if (cpu_vendor_id == CPU_VENDOR_INTEL)
  251                 md = pmc_intel_initialize();
  252         else
  253                 return (NULL);
  254 
  255         /* disallow sampling if we do not have an LAPIC */
  256         if (md != NULL && !lapic_enable_pmc())
  257                 for (i = 0; i < md->pmd_nclass; i++) {
  258                         if (i == PMC_CLASS_INDEX_SOFT)
  259                                 continue;
  260                         md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
  261                 }
  262 
  263         return (md);
  264 }
  265 
  266 void
  267 pmc_md_finalize(struct pmc_mdep *md)
  268 {
  269 
  270         lapic_disable_pmc();
  271         if (cpu_vendor_id == CPU_VENDOR_AMD ||
  272             cpu_vendor_id == CPU_VENDOR_HYGON)
  273                 pmc_amd_finalize(md);
  274         else if (cpu_vendor_id == CPU_VENDOR_INTEL)
  275                 pmc_intel_finalize(md);
  276         else
  277                 KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));
  278 }

Cache object: 6bada72a855d5cf647177feb077f6445


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.