The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/support.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Peter Wemm.
    3  * Copyright (c) 1993 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 4. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * $FreeBSD: releng/6.2/sys/amd64/amd64/support.S 147569 2005-06-24 00:45:01Z peter $
   31  */
   32 
   33 #include "opt_ddb.h"
   34 
   35 #include <machine/asmacros.h>
   36 #include <machine/intr_machdep.h>
   37 #include <machine/pmap.h>
   38 
   39 #include "assym.s"
   40 
   41         ALIGN_DATA
   42         .globl  intrcnt, eintrcnt
   43 intrcnt:
   44         .space  INTRCNT_COUNT * 8
   45 eintrcnt:
   46 
   47         .globl  intrnames, eintrnames
   48 intrnames:
   49         .space  INTRCNT_COUNT * (MAXCOMLEN + 1)
   50 eintrnames:
   51 
   52         .text
   53 
   54 /*
   55  * bcopy family
   56  * void bzero(void *buf, u_int len)
   57  */
   58 
   59 /* done */
   60 ENTRY(bzero)
   61         movq    %rsi,%rcx
   62         xorq    %rax,%rax
   63         shrq    $3,%rcx
   64         cld
   65         rep
   66         stosq
   67         movq    %rsi,%rcx
   68         andq    $7,%rcx
   69         rep
   70         stosb
   71         ret
   72 
   73 /* Address: %rdi */
   74 ENTRY(pagezero)
   75         movq    $-PAGE_SIZE,%rdx
   76         subq    %rdx,%rdi
   77         xorq    %rax,%rax
   78 1:
   79         movnti  %rax,(%rdi,%rdx)
   80         movnti  %rax,8(%rdi,%rdx)
   81         movnti  %rax,16(%rdi,%rdx)
   82         movnti  %rax,24(%rdi,%rdx)
   83         addq    $32,%rdx
   84         jne     1b
   85         sfence
   86         ret
   87 
   88 ENTRY(bcmp)
   89         movq    %rdx,%rcx
   90         shrq    $3,%rcx
   91         cld                                     /* compare forwards */
   92         repe
   93         cmpsq
   94         jne     1f
   95 
   96         movq    %rdx,%rcx
   97         andq    $7,%rcx
   98         repe
   99         cmpsb
  100 1:
  101         setne   %al
  102         movsbl  %al,%eax
  103         ret
  104 
  105 /*
  106  * bcopy(src, dst, cnt)
  107  *       rdi, rsi, rdx
  108  *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
  109  */
  110 ENTRY(bcopy)
  111         xchgq   %rsi,%rdi
  112         movq    %rdx,%rcx
  113 
  114         movq    %rdi,%rax
  115         subq    %rsi,%rax
  116         cmpq    %rcx,%rax                       /* overlapping && src < dst? */
  117         jb      1f
  118 
  119         shrq    $3,%rcx                         /* copy by 64-bit words */
  120         cld                                     /* nope, copy forwards */
  121         rep
  122         movsq
  123         movq    %rdx,%rcx
  124         andq    $7,%rcx                         /* any bytes left? */
  125         rep
  126         movsb
  127         ret
  128 
  129         /* ALIGN_TEXT */
  130 1:
  131         addq    %rcx,%rdi                       /* copy backwards */
  132         addq    %rcx,%rsi
  133         decq    %rdi
  134         decq    %rsi
  135         andq    $7,%rcx                         /* any fractional bytes? */
  136         std
  137         rep
  138         movsb
  139         movq    %rdx,%rcx                       /* copy remainder by 32-bit words */
  140         shrq    $3,%rcx
  141         subq    $7,%rsi
  142         subq    $7,%rdi
  143         rep
  144         movsq
  145         cld
  146         ret
  147 
  148 /*
  149  * Note: memcpy does not support overlapping copies
  150  */
  151 ENTRY(memcpy)
  152         movq    %rdx,%rcx
  153         shrq    $3,%rcx                         /* copy by 64-bit words */
  154         cld                                     /* copy forwards */
  155         rep
  156         movsq
  157         movq    %rdx,%rcx
  158         andq    $7,%rcx                         /* any bytes left? */
  159         rep
  160         movsb
  161         ret
  162 
  163 /*
  164  * pagecopy(%rdi=from, %rsi=to)
  165  */
  166 ENTRY(pagecopy)
  167         movq    $-PAGE_SIZE,%rax
  168         movq    %rax,%rdx
  169         subq    %rax,%rdi
  170         subq    %rax,%rsi
  171 1:
  172         prefetchnta (%rdi,%rax)
  173         addq    $64,%rax
  174         jne     1b
  175 2:
  176         movq    (%rdi,%rdx),%rax
  177         movnti  %rax,(%rsi,%rdx)
  178         movq    8(%rdi,%rdx),%rax
  179         movnti  %rax,8(%rsi,%rdx)
  180         movq    16(%rdi,%rdx),%rax
  181         movnti  %rax,16(%rsi,%rdx)
  182         movq    24(%rdi,%rdx),%rax
  183         movnti  %rax,24(%rsi,%rdx)
  184         addq    $32,%rdx
  185         jne     2b
  186         sfence
  187         ret
  188 
  189 /* fillw(pat, base, cnt) */  
  190 /*       %rdi,%rsi, %rdx */
  191 ENTRY(fillw)
  192         movq    %rdi,%rax   
  193         movq    %rsi,%rdi
  194         movq    %rdx,%rcx
  195         cld
  196         rep
  197         stosw
  198         ret
  199 
  200 /*****************************************************************************/
  201 /* copyout and fubyte family                                                 */
  202 /*****************************************************************************/
  203 /*
  204  * Access user memory from inside the kernel. These routines should be
  205  * the only places that do this.
  206  *
  207  * These routines set curpcb->onfault for the time they execute. When a
  208  * protection violation occurs inside the functions, the trap handler
  209  * returns to *curpcb->onfault instead of the function.
  210  */
  211 
  212 /*
  213  * copyout(from_kernel, to_user, len)  - MP SAFE
  214  *         %rdi,        %rsi,    %rdx
  215  */
  216 ENTRY(copyout)
  217         movq    PCPU(CURPCB),%rax
  218         movq    $copyout_fault,PCB_ONFAULT(%rax)
  219         testq   %rdx,%rdx                       /* anything to do? */
  220         jz      done_copyout
  221 
  222         /*
  223          * Check explicitly for non-user addresses.  If 486 write protection
  224          * is being used, this check is essential because we are in kernel
  225          * mode so the h/w does not provide any protection against writing
  226          * kernel addresses.
  227          */
  228 
  229         /*
  230          * First, prevent address wrapping.
  231          */
  232         movq    %rsi,%rax
  233         addq    %rdx,%rax
  234         jc      copyout_fault
  235 /*
  236  * XXX STOP USING VM_MAXUSER_ADDRESS.
  237  * It is an end address, not a max, so every time it is used correctly it
  238  * looks like there is an off by one error, and of course it caused an off
  239  * by one error in several places.
  240  */
  241         movq    $VM_MAXUSER_ADDRESS,%rcx
  242         cmpq    %rcx,%rax
  243         ja      copyout_fault
  244 
  245         xchgq   %rdi, %rsi
  246         /* bcopy(%rsi, %rdi, %rdx) */
  247         movq    %rdx,%rcx
  248 
  249         shrq    $3,%rcx
  250         cld
  251         rep
  252         movsq
  253         movb    %dl,%cl
  254         andb    $7,%cl
  255         rep
  256         movsb
  257 
  258 done_copyout:
  259         xorq    %rax,%rax
  260         movq    PCPU(CURPCB),%rdx
  261         movq    %rax,PCB_ONFAULT(%rdx)
  262         ret
  263 
  264         ALIGN_TEXT
  265 copyout_fault:
  266         movq    PCPU(CURPCB),%rdx
  267         movq    $0,PCB_ONFAULT(%rdx)
  268         movq    $EFAULT,%rax
  269         ret
  270 
  271 /*
  272  * copyin(from_user, to_kernel, len) - MP SAFE
  273  *        %rdi,      %rsi,      %rdx
  274  */
  275 ENTRY(copyin)
  276         movq    PCPU(CURPCB),%rax
  277         movq    $copyin_fault,PCB_ONFAULT(%rax)
  278         testq   %rdx,%rdx                       /* anything to do? */
  279         jz      done_copyin
  280 
  281         /*
  282          * make sure address is valid
  283          */
  284         movq    %rdi,%rax
  285         addq    %rdx,%rax
  286         jc      copyin_fault
  287         movq    $VM_MAXUSER_ADDRESS,%rcx
  288         cmpq    %rcx,%rax
  289         ja      copyin_fault
  290 
  291         xchgq   %rdi, %rsi
  292         movq    %rdx, %rcx
  293         movb    %cl,%al
  294         shrq    $3,%rcx                         /* copy longword-wise */
  295         cld
  296         rep
  297         movsq
  298         movb    %al,%cl
  299         andb    $7,%cl                          /* copy remaining bytes */
  300         rep
  301         movsb
  302 
  303 done_copyin:
  304         xorq    %rax,%rax
  305         movq    PCPU(CURPCB),%rdx
  306         movq    %rax,PCB_ONFAULT(%rdx)
  307         ret
  308 
  309         ALIGN_TEXT
  310 copyin_fault:
  311         movq    PCPU(CURPCB),%rdx
  312         movq    $0,PCB_ONFAULT(%rdx)
  313         movq    $EFAULT,%rax
  314         ret
  315 
  316 /*
  317  * casuptr.  Compare and set user pointer.  Returns -1 or the current value.
  318  *        dst = %rdi, old = %rsi, new = %rdx
  319  */
  320 ENTRY(casuptr)
  321         movq    PCPU(CURPCB),%rcx
  322         movq    $fusufault,PCB_ONFAULT(%rcx)
  323 
  324         movq    $VM_MAXUSER_ADDRESS-4,%rax
  325         cmpq    %rax,%rdi                       /* verify address is valid */
  326         ja      fusufault
  327 
  328         movq    %rsi, %rax                      /* old */
  329 #ifdef SMP
  330         lock
  331 #endif
  332         cmpxchgq %rdx, (%rdi)                   /* new = %rdx */
  333 
  334         /*
  335          * The old value is in %eax.  If the store succeeded it will be the
  336          * value we expected (old) from before the store, otherwise it will
  337          * be the current value.
  338          */
  339 
  340         movq    PCPU(CURPCB),%rcx
  341         movq    $fusufault,PCB_ONFAULT(%rcx)
  342         movq    $0,PCB_ONFAULT(%rcx)
  343         ret
  344 
  345 /*
  346  * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
  347  * byte from user memory.  All these functions are MPSAFE.
  348  * addr = %rdi
  349  */
  350 
  351 ALTENTRY(fuword64)
  352 ENTRY(fuword)
  353         movq    PCPU(CURPCB),%rcx
  354         movq    $fusufault,PCB_ONFAULT(%rcx)
  355 
  356         movq    $VM_MAXUSER_ADDRESS-8,%rax
  357         cmpq    %rax,%rdi                       /* verify address is valid */
  358         ja      fusufault
  359 
  360         movq    (%rdi),%rax
  361         movq    $0,PCB_ONFAULT(%rcx)
  362         ret
  363 
  364 ENTRY(fuword32)
  365         movq    PCPU(CURPCB),%rcx
  366         movq    $fusufault,PCB_ONFAULT(%rcx)
  367 
  368         movq    $VM_MAXUSER_ADDRESS-4,%rax
  369         cmpq    %rax,%rdi                       /* verify address is valid */
  370         ja      fusufault
  371 
  372         movl    (%rdi),%eax
  373         movq    $0,PCB_ONFAULT(%rcx)
  374         ret
  375 
  376 /*
  377  * fuswintr() and suswintr() are specialized variants of fuword16() and
  378  * suword16(), respectively.  They are called from the profiling code,
  379  * potentially at interrupt time.  If they fail, that's okay; good things
  380  * will happen later.  They always fail for now, until the trap code is
  381  * able to deal with this.
  382  */
  383 ALTENTRY(suswintr)
  384 ENTRY(fuswintr)
  385         movq    $-1,%rax
  386         ret
  387 
  388 ENTRY(fuword16)
  389         movq    PCPU(CURPCB),%rcx
  390         movq    $fusufault,PCB_ONFAULT(%rcx)
  391 
  392         movq    $VM_MAXUSER_ADDRESS-2,%rax
  393         cmpq    %rax,%rdi
  394         ja      fusufault
  395 
  396         movzwl  (%rdi),%eax
  397         movq    $0,PCB_ONFAULT(%rcx)
  398         ret
  399 
  400 ENTRY(fubyte)
  401         movq    PCPU(CURPCB),%rcx
  402         movq    $fusufault,PCB_ONFAULT(%rcx)
  403 
  404         movq    $VM_MAXUSER_ADDRESS-1,%rax
  405         cmpq    %rax,%rdi
  406         ja      fusufault
  407 
  408         movzbl  (%rdi),%eax
  409         movq    $0,PCB_ONFAULT(%rcx)
  410         ret
  411 
  412         ALIGN_TEXT
  413 fusufault:
  414         movq    PCPU(CURPCB),%rcx
  415         xorq    %rax,%rax
  416         movq    %rax,PCB_ONFAULT(%rcx)
  417         decq    %rax
  418         ret
  419 
  420 /*
  421  * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
  422  * user memory.  All these functions are MPSAFE.
  423  * addr = %rdi, value = %rsi
  424  */
  425 ALTENTRY(suword64)
  426 ENTRY(suword)
  427         movq    PCPU(CURPCB),%rcx
  428         movq    $fusufault,PCB_ONFAULT(%rcx)
  429 
  430         movq    $VM_MAXUSER_ADDRESS-8,%rax
  431         cmpq    %rax,%rdi                       /* verify address validity */
  432         ja      fusufault
  433 
  434         movq    %rsi,(%rdi)
  435         xorq    %rax,%rax
  436         movq    PCPU(CURPCB),%rcx
  437         movq    %rax,PCB_ONFAULT(%rcx)
  438         ret
  439 
  440 ENTRY(suword32)
  441         movq    PCPU(CURPCB),%rcx
  442         movq    $fusufault,PCB_ONFAULT(%rcx)
  443 
  444         movq    $VM_MAXUSER_ADDRESS-4,%rax
  445         cmpq    %rax,%rdi                       /* verify address validity */
  446         ja      fusufault
  447 
  448         movl    %esi,(%rdi)
  449         xorq    %rax,%rax
  450         movq    PCPU(CURPCB),%rcx
  451         movq    %rax,PCB_ONFAULT(%rcx)
  452         ret
  453 
  454 ENTRY(suword16)
  455         movq    PCPU(CURPCB),%rcx
  456         movq    $fusufault,PCB_ONFAULT(%rcx)
  457 
  458         movq    $VM_MAXUSER_ADDRESS-2,%rax
  459         cmpq    %rax,%rdi                       /* verify address validity */
  460         ja      fusufault
  461 
  462         movw    %si,(%rdi)
  463         xorq    %rax,%rax
  464         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  465         movq    %rax,PCB_ONFAULT(%rcx)
  466         ret
  467 
  468 ENTRY(subyte)
  469         movq    PCPU(CURPCB),%rcx
  470         movq    $fusufault,PCB_ONFAULT(%rcx)
  471 
  472         movq    $VM_MAXUSER_ADDRESS-1,%rax
  473         cmpq    %rax,%rdi                       /* verify address validity */
  474         ja      fusufault
  475 
  476         movl    %esi, %eax
  477         movb    %al,(%rdi)
  478         xorq    %rax,%rax
  479         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  480         movq    %rax,PCB_ONFAULT(%rcx)
  481         ret
  482 
  483 /*
  484  * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
  485  *           %rdi, %rsi, %rdx, %rcx
  486  *
  487  *      copy a string from from to to, stop when a 0 character is reached.
  488  *      return ENAMETOOLONG if string is longer than maxlen, and
  489  *      EFAULT on protection violations. If lencopied is non-zero,
  490  *      return the actual length in *lencopied.
  491  */
  492 ENTRY(copyinstr)
  493         movq    %rdx, %r8                       /* %r8 = maxlen */
  494         movq    %rcx, %r9                       /* %r9 = *len */
  495         xchgq   %rdi, %rsi                      /* %rdi = from, %rsi = to */
  496         movq    PCPU(CURPCB),%rcx
  497         movq    $cpystrflt,PCB_ONFAULT(%rcx)
  498 
  499         movq    $VM_MAXUSER_ADDRESS,%rax
  500 
  501         /* make sure 'from' is within bounds */
  502         subq    %rsi,%rax
  503         jbe     cpystrflt
  504 
  505         /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
  506         cmpq    %rdx,%rax
  507         jae     1f
  508         movq    %rax,%rdx
  509         movq    %rax,%r8
  510 1:
  511         incq    %rdx
  512         cld
  513 
  514 2:
  515         decq    %rdx
  516         jz      3f
  517 
  518         lodsb
  519         stosb
  520         orb     %al,%al
  521         jnz     2b
  522 
  523         /* Success -- 0 byte reached */
  524         decq    %rdx
  525         xorq    %rax,%rax
  526         jmp     cpystrflt_x
  527 3:
  528         /* rdx is zero - return ENAMETOOLONG or EFAULT */
  529         movq    $VM_MAXUSER_ADDRESS,%rax
  530         cmpq    %rax,%rsi
  531         jae     cpystrflt
  532 4:
  533         movq    $ENAMETOOLONG,%rax
  534         jmp     cpystrflt_x
  535 
  536 cpystrflt:
  537         movq    $EFAULT,%rax
  538 
  539 cpystrflt_x:
  540         /* set *lencopied and return %eax */
  541         movq    PCPU(CURPCB),%rcx
  542         movq    $0,PCB_ONFAULT(%rcx)
  543 
  544         testq   %r9,%r9
  545         jz      1f
  546         subq    %rdx,%r8
  547         movq    %r8,(%r9)
  548 1:
  549         ret
  550 
  551 
  552 /*
  553  * copystr(from, to, maxlen, int *lencopied) - MP SAFE
  554  *         %rdi, %rsi, %rdx, %rcx
  555  */
  556 ENTRY(copystr)
  557         movq    %rdx, %r8                       /* %r8 = maxlen */
  558 
  559         xchgq   %rdi, %rsi
  560         incq    %rdx
  561         cld
  562 1:
  563         decq    %rdx
  564         jz      4f
  565         lodsb
  566         stosb
  567         orb     %al,%al
  568         jnz     1b
  569 
  570         /* Success -- 0 byte reached */
  571         decq    %rdx
  572         xorq    %rax,%rax
  573         jmp     6f
  574 4:
  575         /* rdx is zero -- return ENAMETOOLONG */
  576         movq    $ENAMETOOLONG,%rax
  577 
  578 6:
  579 
  580         testq   %rcx, %rcx
  581         jz      7f
  582         /* set *lencopied and return %rax */
  583         subq    %rdx, %r8
  584         movq    %r8, (%rcx)
  585 7:
  586         ret
  587 
  588 /*
  589  * Handling of special amd64 registers and descriptor tables etc
  590  * %rdi
  591  */
  592 /* void lgdt(struct region_descriptor *rdp); */
  593 ENTRY(lgdt)
  594         /* reload the descriptor table */
  595         lgdt    (%rdi)
  596 
  597         /* flush the prefetch q */
  598         jmp     1f
  599         nop
  600 1:
  601         movl    $KDSEL, %eax
  602         movl    %eax,%ds
  603         movl    %eax,%es
  604         movl    %eax,%fs        /* Beware, use wrmsr to set 64 bit base */
  605         movl    %eax,%gs
  606         movl    %eax,%ss
  607 
  608         /* reload code selector by turning return into intersegmental return */
  609         popq    %rax
  610         pushq   $KCSEL
  611         pushq   %rax
  612         MEXITCOUNT
  613         lretq
  614 
  615 /*****************************************************************************/
  616 /* setjump, longjump                                                         */
  617 /*****************************************************************************/
  618 
  619 ENTRY(setjmp)
  620         movq    %rbx,0(%rdi)                    /* save rbx */
  621         movq    %rsp,8(%rdi)                    /* save rsp */
  622         movq    %rbp,16(%rdi)                   /* save rbp */
  623         movq    %r12,24(%rdi)                   /* save r12 */
  624         movq    %r13,32(%rdi)                   /* save r13 */
  625         movq    %r14,40(%rdi)                   /* save r14 */
  626         movq    %r15,48(%rdi)                   /* save r15 */
  627         movq    0(%rsp),%rdx                    /* get rta */
  628         movq    %rdx,56(%rdi)                   /* save rip */
  629         xorl    %eax,%eax                       /* return(0); */
  630         ret
  631 
  632 ENTRY(longjmp)
  633         movq    0(%rdi),%rbx                    /* restore rbx */
  634         movq    8(%rdi),%rsp                    /* restore rsp */
  635         movq    16(%rdi),%rbp                   /* restore rbp */
  636         movq    24(%rdi),%r12                   /* restore r12 */
  637         movq    32(%rdi),%r13                   /* restore r13 */
  638         movq    40(%rdi),%r14                   /* restore r14 */
  639         movq    48(%rdi),%r15                   /* restore r15 */
  640         movq    56(%rdi),%rdx                   /* get rta */
  641         movq    %rdx,0(%rsp)                    /* put in return frame */
  642         xorl    %eax,%eax                       /* return(1); */
  643         incl    %eax
  644         ret
  645 
  646 /*
  647  * Support for BB-profiling (gcc -a).  The kernbb program will extract
  648  * the data from the kernel.
  649  */
  650 
  651         .data
  652         ALIGN_DATA
  653         .globl bbhead
  654 bbhead:
  655         .quad 0
  656 
  657         .text
  658 NON_GPROF_ENTRY(__bb_init_func)
  659         movq    $1,(%rdi)
  660         movq    bbhead,%rax
  661         movq    %rax,32(%rdi)
  662         movq    %rdi,bbhead
  663         NON_GPROF_RET

Cache object: 8837612f58730f825a76af03cbb5f09b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.