The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cddl/dev/dtrace/i386/dtrace_isa.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * CDDL HEADER START
    3  *
    4  * The contents of this file are subject to the terms of the
    5  * Common Development and Distribution License, Version 1.0 only
    6  * (the "License").  You may not use this file except in compliance
    7  * with the License.
    8  *
    9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   10  * or http://www.opensolaris.org/os/licensing.
   11  * See the License for the specific language governing permissions
   12  * and limitations under the License.
   13  *
   14  * When distributing Covered Code, include this CDDL HEADER in each
   15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
   16  * If applicable, add the following below this CDDL HEADER, with the
   17  * fields enclosed by brackets "[]" replaced with your own identifying
   18  * information: Portions Copyright [yyyy] [name of copyright owner]
   19  *
   20  * CDDL HEADER END
   21  *
   22  * $FreeBSD$
   23  */
   24 /*
   25  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
   26  * Use is subject to license terms.
   27  */
   28 #include <sys/cdefs.h>
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/kernel.h>
   33 #include <sys/stack.h>
   34 #include <sys/pcpu.h>
   35 
   36 #include <machine/frame.h>
   37 #include <machine/md_var.h>
   38 #include <machine/pcb.h>
   39 #include <machine/stack.h>
   40 
   41 #include <vm/vm.h>
   42 #include <vm/vm_param.h>
   43 #include <vm/pmap.h>
   44 
   45 #include "regset.h"
   46 
   47 extern uintptr_t kernbase;
   48 uintptr_t kernelbase = (uintptr_t) &kernbase;
   49 
   50 uint8_t dtrace_fuword8_nocheck(void *);
   51 uint16_t dtrace_fuword16_nocheck(void *);
   52 uint32_t dtrace_fuword32_nocheck(void *);
   53 uint64_t dtrace_fuword64_nocheck(void *);
   54 
   55 int     dtrace_ustackdepth_max = 2048;
   56 
   57 void
   58 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
   59     uint32_t *intrpc)
   60 {
   61         int depth = 0;
   62         register_t ebp;
   63         struct i386_frame *frame;
   64         vm_offset_t callpc;
   65         pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
   66 
   67         if (intrpc != 0)
   68                 pcstack[depth++] = (pc_t) intrpc;
   69 
   70         aframes++;
   71 
   72         __asm __volatile("movl %%ebp,%0" : "=r" (ebp));
   73 
   74         frame = (struct i386_frame *)ebp;
   75         while (depth < pcstack_limit) {
   76                 if (!kstack_contains(curthread, (vm_offset_t)frame,
   77                     sizeof(*frame)))
   78                         break;
   79 
   80                 callpc = frame->f_retaddr;
   81 
   82                 if (!INKERNEL(callpc))
   83                         break;
   84 
   85                 if (aframes > 0) {
   86                         aframes--;
   87                         if ((aframes == 0) && (caller != 0)) {
   88                                 pcstack[depth++] = caller;
   89                         }
   90                 }
   91                 else {
   92                         pcstack[depth++] = callpc;
   93                 }
   94 
   95                 if (frame->f_frame <= frame)
   96                         break;
   97                 frame = frame->f_frame;
   98         }
   99 
  100         for (; depth < pcstack_limit; depth++) {
  101                 pcstack[depth] = 0;
  102         }
  103 }
  104 
  105 static int
  106 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
  107     uintptr_t sp)
  108 {
  109 #ifdef notyet
  110         proc_t *p = curproc;
  111         uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
  112         size_t s1, s2;
  113 #endif
  114         uintptr_t oldsp;
  115         volatile uint16_t *flags =
  116             (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
  117         int ret = 0;
  118 
  119         ASSERT(pcstack == NULL || pcstack_limit > 0);
  120         ASSERT(dtrace_ustackdepth_max > 0);
  121 
  122 #ifdef notyet /* XXX signal stack. */
  123         if (p->p_model == DATAMODEL_NATIVE) {
  124                 s1 = sizeof (struct frame) + 2 * sizeof (long);
  125                 s2 = s1 + sizeof (siginfo_t);
  126         } else {
  127                 s1 = sizeof (struct frame32) + 3 * sizeof (int);
  128                 s2 = s1 + sizeof (siginfo32_t);
  129         }
  130 #endif
  131 
  132         while (pc != 0) {
  133                 /*
  134                  * We limit the number of times we can go around this
  135                  * loop to account for a circular stack.
  136                  */
  137                 if (ret++ >= dtrace_ustackdepth_max) {
  138                         *flags |= CPU_DTRACE_BADSTACK;
  139                         cpu_core[curcpu].cpuc_dtrace_illval = sp;
  140                         break;
  141                 }
  142 
  143                 if (pcstack != NULL) {
  144                         *pcstack++ = (uint64_t)pc;
  145                         pcstack_limit--;
  146                         if (pcstack_limit <= 0)
  147                                 break;
  148                 }
  149 
  150                 if (sp == 0)
  151                         break;
  152 
  153                 oldsp = sp;
  154 
  155 #ifdef notyet /* XXX signal stack. */ 
  156                 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
  157                         if (p->p_model == DATAMODEL_NATIVE) {
  158                                 ucontext_t *ucp = (ucontext_t *)oldcontext;
  159                                 greg_t *gregs = ucp->uc_mcontext.gregs;
  160 
  161                                 sp = dtrace_fulword(&gregs[REG_FP]);
  162                                 pc = dtrace_fulword(&gregs[REG_PC]);
  163 
  164                                 oldcontext = dtrace_fulword(&ucp->uc_link);
  165                         } else {
  166                                 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
  167                                 greg32_t *gregs = ucp->uc_mcontext.gregs;
  168 
  169                                 sp = dtrace_fuword32(&gregs[EBP]);
  170                                 pc = dtrace_fuword32(&gregs[EIP]);
  171 
  172                                 oldcontext = dtrace_fuword32(&ucp->uc_link);
  173                         }
  174                 } else {
  175                         if (p->p_model == DATAMODEL_NATIVE) {
  176                                 struct frame *fr = (struct frame *)sp;
  177 
  178                                 pc = dtrace_fulword(&fr->fr_savpc);
  179                                 sp = dtrace_fulword(&fr->fr_savfp);
  180                         } else {
  181                                 struct frame32 *fr = (struct frame32 *)sp;
  182 
  183                                 pc = dtrace_fuword32(&fr->fr_savpc);
  184                                 sp = dtrace_fuword32(&fr->fr_savfp);
  185                         }
  186                 }
  187 #else
  188                 pc = dtrace_fuword32((void *)(sp +
  189                         offsetof(struct i386_frame, f_retaddr)));
  190                 sp = dtrace_fuword32((void *)sp);
  191 #endif /* ! notyet */
  192 
  193                 if (sp == oldsp) {
  194                         *flags |= CPU_DTRACE_BADSTACK;
  195                         cpu_core[curcpu].cpuc_dtrace_illval = sp;
  196                         break;
  197                 }
  198 
  199                 /*
  200                  * This is totally bogus:  if we faulted, we're going to clear
  201                  * the fault and break.  This is to deal with the apparently
  202                  * broken Java stacks on x86.
  203                  */
  204                 if (*flags & CPU_DTRACE_FAULT) {
  205                         *flags &= ~CPU_DTRACE_FAULT;
  206                         break;
  207                 }
  208         }
  209 
  210         return (ret);
  211 }
  212 
  213 void
  214 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
  215 {
  216         proc_t *p = curproc;
  217         struct trapframe *tf;
  218         uintptr_t pc, sp, fp;
  219         volatile uint16_t *flags =
  220             (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
  221         int n;
  222 
  223         if (*flags & CPU_DTRACE_FAULT)
  224                 return;
  225 
  226         if (pcstack_limit <= 0)
  227                 return;
  228 
  229         /*
  230          * If there's no user context we still need to zero the stack.
  231          */
  232         if (p == NULL || (tf = curthread->td_frame) == NULL)
  233                 goto zero;
  234 
  235         *pcstack++ = (uint64_t)p->p_pid;
  236         pcstack_limit--;
  237 
  238         if (pcstack_limit <= 0)
  239                 return;
  240 
  241         pc = tf->tf_eip;
  242         fp = tf->tf_ebp;
  243         sp = tf->tf_esp;
  244 
  245         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
  246                 /*
  247                  * In an entry probe.  The frame pointer has not yet been
  248                  * pushed (that happens in the function prologue).  The
  249                  * best approach is to add the current pc as a missing top
  250                  * of stack and back the pc up to the caller, which is stored
  251                  * at the current stack pointer address since the call 
  252                  * instruction puts it there right before the branch.
  253                  */
  254 
  255                 *pcstack++ = (uint64_t)pc;
  256                 pcstack_limit--;
  257                 if (pcstack_limit <= 0)
  258                         return;
  259 
  260                 pc = dtrace_fuword32((void *) sp);
  261         }
  262 
  263         n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
  264         ASSERT(n >= 0);
  265         ASSERT(n <= pcstack_limit);
  266 
  267         pcstack += n;
  268         pcstack_limit -= n;
  269 
  270 zero:
  271         while (pcstack_limit-- > 0)
  272                 *pcstack++ = 0;
  273 }
  274 
  275 int
  276 dtrace_getustackdepth(void)
  277 {
  278         proc_t *p = curproc;
  279         struct trapframe *tf;
  280         uintptr_t pc, fp, sp;
  281         int n = 0;
  282 
  283         if (p == NULL || (tf = curthread->td_frame) == NULL)
  284                 return (0);
  285 
  286         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
  287                 return (-1);
  288 
  289         pc = tf->tf_eip;
  290         fp = tf->tf_ebp;
  291         sp = tf->tf_esp;
  292 
  293         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
  294                 /*
  295                  * In an entry probe.  The frame pointer has not yet been
  296                  * pushed (that happens in the function prologue).  The
  297                  * best approach is to add the current pc as a missing top
  298                  * of stack and back the pc up to the caller, which is stored
  299                  * at the current stack pointer address since the call 
  300                  * instruction puts it there right before the branch.
  301                  */
  302 
  303                 pc = dtrace_fuword32((void *) sp);
  304                 n++;
  305         }
  306 
  307         n += dtrace_getustack_common(NULL, 0, pc, fp);
  308 
  309         return (n);
  310 }
  311 
  312 void
  313 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
  314 {
  315         proc_t *p = curproc;
  316         struct trapframe *tf;
  317         uintptr_t pc, sp, fp;
  318         volatile uint16_t *flags =
  319             (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
  320 #ifdef notyet /* XXX signal stack */
  321         uintptr_t oldcontext;
  322         size_t s1, s2;
  323 #endif
  324 
  325         if (*flags & CPU_DTRACE_FAULT)
  326                 return;
  327 
  328         if (pcstack_limit <= 0)
  329                 return;
  330 
  331         /*
  332          * If there's no user context we still need to zero the stack.
  333          */
  334         if (p == NULL || (tf = curthread->td_frame) == NULL)
  335                 goto zero;
  336 
  337         *pcstack++ = (uint64_t)p->p_pid;
  338         pcstack_limit--;
  339 
  340         if (pcstack_limit <= 0)
  341                 return;
  342 
  343         pc = tf->tf_eip;
  344         fp = tf->tf_ebp;
  345         sp = tf->tf_esp;
  346 
  347 #ifdef notyet /* XXX signal stack */
  348         oldcontext = lwp->lwp_oldcontext;
  349 
  350         if (p->p_model == DATAMODEL_NATIVE) {
  351                 s1 = sizeof (struct frame) + 2 * sizeof (long);
  352                 s2 = s1 + sizeof (siginfo_t);
  353         } else {
  354                 s1 = sizeof (struct frame32) + 3 * sizeof (int);
  355                 s2 = s1 + sizeof (siginfo32_t);
  356         }
  357 #endif
  358 
  359         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
  360                 *pcstack++ = (uint64_t)pc;
  361                 *fpstack++ = 0;
  362                 pcstack_limit--;
  363                 if (pcstack_limit <= 0)
  364                         return;
  365 
  366                 pc = dtrace_fuword32((void *)sp);
  367         }
  368 
  369         while (pc != 0) {
  370                 *pcstack++ = (uint64_t)pc;
  371                 *fpstack++ = fp;
  372                 pcstack_limit--;
  373                 if (pcstack_limit <= 0)
  374                         break;
  375 
  376                 if (fp == 0)
  377                         break;
  378 
  379 #ifdef notyet /* XXX signal stack */
  380                 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
  381                         if (p->p_model == DATAMODEL_NATIVE) {
  382                                 ucontext_t *ucp = (ucontext_t *)oldcontext;
  383                                 greg_t *gregs = ucp->uc_mcontext.gregs;
  384 
  385                                 sp = dtrace_fulword(&gregs[REG_FP]);
  386                                 pc = dtrace_fulword(&gregs[REG_PC]);
  387 
  388                                 oldcontext = dtrace_fulword(&ucp->uc_link);
  389                         } else {
  390                                 ucontext_t *ucp = (ucontext_t *)oldcontext;
  391                                 greg_t *gregs = ucp->uc_mcontext.gregs;
  392 
  393                                 sp = dtrace_fuword32(&gregs[EBP]);
  394                                 pc = dtrace_fuword32(&gregs[EIP]);
  395 
  396                                 oldcontext = dtrace_fuword32(&ucp->uc_link);
  397                         }
  398                 } else
  399 #endif /* XXX */
  400                 {
  401                         pc = dtrace_fuword32((void *)(fp +
  402                                 offsetof(struct i386_frame, f_retaddr)));
  403                         fp = dtrace_fuword32((void *)fp);
  404                 }
  405 
  406                 /*
  407                  * This is totally bogus:  if we faulted, we're going to clear
  408                  * the fault and break.  This is to deal with the apparently
  409                  * broken Java stacks on x86.
  410                  */
  411                 if (*flags & CPU_DTRACE_FAULT) {
  412                         *flags &= ~CPU_DTRACE_FAULT;
  413                         break;
  414                 }
  415         }
  416 
  417 zero:
  418         while (pcstack_limit-- > 0)
  419                 *pcstack++ = 0;
  420 }
  421 
  422 uint64_t
  423 dtrace_getarg(int arg, int aframes)
  424 {
  425         struct trapframe *frame;
  426         struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
  427         uintptr_t *stack, val;
  428         int i;
  429 
  430         for (i = 1; i <= aframes; i++) {
  431                 fp = fp->f_frame;
  432 
  433                 if (P2ROUNDUP(fp->f_retaddr, 4) ==
  434                     (long)dtrace_invop_callsite) {
  435                         /*
  436                          * If we pass through the invalid op handler, we will
  437                          * use the trap frame pointer that it pushed on the
  438                          * stack as the second argument to dtrace_invop() as
  439                          * the pointer to the stack.  When using this stack, we
  440                          * must skip the third argument to dtrace_invop(),
  441                          * which is included in the i386_frame.
  442                          */
  443                         frame = (struct trapframe *)(((uintptr_t **)&fp[1])[0]);
  444                         /*
  445                          * Skip the three hardware-saved registers and the
  446                          * return address.
  447                          */
  448                         stack = (uintptr_t *)frame->tf_isp + 4;
  449                         goto load;
  450                 }
  451 
  452         }
  453 
  454         /*
  455          * We know that we did not come through a trap to get into
  456          * dtrace_probe() -- the provider simply called dtrace_probe()
  457          * directly.  As this is the case, we need to shift the argument
  458          * that we're looking for:  the probe ID is the first argument to
  459          * dtrace_probe(), so the argument n will actually be found where
  460          * one would expect to find argument (n + 1).
  461          */
  462         arg++;
  463 
  464         stack = (uintptr_t *)fp + 2;
  465 
  466 load:
  467         DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
  468         val = stack[arg];
  469         DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
  470 
  471         return (val);
  472 }
  473 
  474 int
  475 dtrace_getstackdepth(int aframes)
  476 {
  477         int depth = 0;
  478         struct i386_frame *frame;
  479         vm_offset_t ebp;
  480 
  481         aframes++;
  482         ebp = dtrace_getfp();
  483         frame = (struct i386_frame *)ebp;
  484         depth++;
  485         for(;;) {
  486                 if (!kstack_contains(curthread, (vm_offset_t)frame,
  487                     sizeof(*frame)))
  488                         break;
  489                 depth++;
  490                 if (frame->f_frame <= frame)
  491                         break;
  492                 frame = frame->f_frame;
  493         }
  494         if (depth < aframes)
  495                 return 0;
  496         else
  497                 return depth - aframes;
  498 }
  499 
  500 ulong_t
  501 dtrace_getreg(struct trapframe *rp, uint_t reg)
  502 {
  503         struct pcb *pcb;
  504         int regmap[] = {  /* Order is dependent on reg.d */
  505                 REG_GS,         /* 0  GS */
  506                 REG_FS,         /* 1  FS */
  507                 REG_ES,         /* 2  ES */
  508                 REG_DS,         /* 3  DS */
  509                 REG_RDI,        /* 4  EDI */
  510                 REG_RSI,        /* 5  ESI */
  511                 REG_RBP,        /* 6  EBP, REG_FP */
  512                 REG_RSP,        /* 7  ESP */
  513                 REG_RBX,        /* 8  EBX */
  514                 REG_RDX,        /* 9  EDX, REG_R1 */
  515                 REG_RCX,        /* 10 ECX */
  516                 REG_RAX,        /* 11 EAX, REG_R0 */
  517                 REG_TRAPNO,     /* 12 TRAPNO */
  518                 REG_ERR,        /* 13 ERR */
  519                 REG_RIP,        /* 14 EIP, REG_PC */
  520                 REG_CS,         /* 15 CS */
  521                 REG_RFL,        /* 16 EFL, REG_PS */
  522                 REG_RSP,        /* 17 UESP, REG_SP */
  523                 REG_SS          /* 18 SS */
  524         };
  525 
  526         if (reg > SS) {
  527                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
  528                 return (0);
  529         }
  530 
  531         if (reg >= sizeof (regmap) / sizeof (int)) {
  532                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
  533                 return (0);
  534         }
  535 
  536         reg = regmap[reg];
  537 
  538         switch(reg) {
  539         case REG_GS:
  540                 if ((pcb = curthread->td_pcb) == NULL) {
  541                         DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
  542                         return (0);
  543                 }
  544                 return (pcb->pcb_gs);
  545         case REG_FS:
  546                 return (rp->tf_fs);
  547         case REG_ES:
  548                 return (rp->tf_es);
  549         case REG_DS:
  550                 return (rp->tf_ds);
  551         case REG_RDI:
  552                 return (rp->tf_edi);
  553         case REG_RSI:
  554                 return (rp->tf_esi);
  555         case REG_RBP:
  556                 return (rp->tf_ebp);
  557         case REG_RSP:
  558                 return (rp->tf_isp);
  559         case REG_RBX:
  560                 return (rp->tf_ebx);
  561         case REG_RCX:
  562                 return (rp->tf_ecx);
  563         case REG_RAX:
  564                 return (rp->tf_eax);
  565         case REG_TRAPNO:
  566                 return (rp->tf_trapno);
  567         case REG_ERR:
  568                 return (rp->tf_err);
  569         case REG_RIP:
  570                 return (rp->tf_eip);
  571         case REG_CS:
  572                 return (rp->tf_cs);
  573         case REG_RFL:
  574                 return (rp->tf_eflags);
  575 #if 0
  576         case REG_RSP:
  577                 return (rp->tf_esp);
  578 #endif
  579         case REG_SS:
  580                 return (rp->tf_ss);
  581         default:
  582                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
  583                 return (0);
  584         }
  585 }
  586 
  587 static int
  588 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
  589 {
  590         ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
  591 
  592         if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
  593                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
  594                 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
  595                 return (0);
  596         }
  597 
  598         return (1);
  599 }
  600 
  601 void
  602 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
  603     volatile uint16_t *flags)
  604 {
  605         if (dtrace_copycheck(uaddr, kaddr, size))
  606                 dtrace_copy(uaddr, kaddr, size);
  607 }
  608 
  609 void
  610 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
  611     volatile uint16_t *flags)
  612 {
  613         if (dtrace_copycheck(uaddr, kaddr, size))
  614                 dtrace_copy(kaddr, uaddr, size);
  615 }
  616 
  617 void
  618 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
  619     volatile uint16_t *flags)
  620 {
  621         if (dtrace_copycheck(uaddr, kaddr, size))
  622                 dtrace_copystr(uaddr, kaddr, size, flags);
  623 }
  624 
  625 void
  626 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
  627     volatile uint16_t *flags)
  628 {
  629         if (dtrace_copycheck(uaddr, kaddr, size))
  630                 dtrace_copystr(kaddr, uaddr, size, flags);
  631 }
  632 
  633 uint8_t
  634 dtrace_fuword8(void *uaddr)
  635 {
  636         if ((uintptr_t)uaddr >= kernelbase) {
  637                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
  638                 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
  639                 return (0);
  640         }
  641         return (dtrace_fuword8_nocheck(uaddr));
  642 }
  643 
  644 uint16_t
  645 dtrace_fuword16(void *uaddr)
  646 {
  647         if ((uintptr_t)uaddr >= kernelbase) {
  648                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
  649                 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
  650                 return (0);
  651         }
  652         return (dtrace_fuword16_nocheck(uaddr));
  653 }
  654 
  655 uint32_t
  656 dtrace_fuword32(void *uaddr)
  657 {
  658         if ((uintptr_t)uaddr >= kernelbase) {
  659                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
  660                 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
  661                 return (0);
  662         }
  663         return (dtrace_fuword32_nocheck(uaddr));
  664 }
  665 
  666 uint64_t
  667 dtrace_fuword64(void *uaddr)
  668 {
  669         if ((uintptr_t)uaddr >= kernelbase) {
  670                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
  671                 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
  672                 return (0);
  673         }
  674         return (dtrace_fuword64_nocheck(uaddr));
  675 }

Cache object: 57341677f83bcc76c59433ae92e5c3ac


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.