The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/support.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Peter Wemm.
    3  * Copyright (c) 1993 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 4. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * $FreeBSD: releng/5.4/sys/amd64/amd64/support.S 145335 2005-04-20 19:11:07Z cvs2svn $
   31  */
   32 
   33 #include "opt_ddb.h"
   34 
   35 #include <machine/asmacros.h>
   36 #include <machine/intr_machdep.h>
   37 #include <machine/pmap.h>
   38 
   39 #include "assym.s"
   40 
   41         ALIGN_DATA
   42         .globl  intrcnt, eintrcnt
   43 intrcnt:
   44         .space  INTRCNT_COUNT * 8
   45 eintrcnt:
   46 
   47         .globl  intrnames, eintrnames
   48 intrnames:
   49         .space  INTRCNT_COUNT * (MAXCOMLEN + 1)
   50 eintrnames:
   51 
   52         .text
   53 
   54 /*
   55  * bcopy family
   56  * void bzero(void *buf, u_int len)
   57  */
   58 
   59 /* done */
   60 ENTRY(bzero)
   61         movq    %rsi,%rcx
   62         xorq    %rax,%rax
   63         shrq    $3,%rcx
   64         cld
   65         rep
   66         stosq
   67         movq    %rsi,%rcx
   68         andq    $7,%rcx
   69         rep
   70         stosb
   71         ret
   72 
   73 /* Address: %rdi */
   74 ENTRY(pagezero)
   75         movq    $-PAGE_SIZE,%rdx
   76         subq    %rdx,%rdi
   77         xorq    %rax,%rax
   78 1:
   79         movnti  %rax,(%rdi,%rdx)
   80         movnti  %rax,8(%rdi,%rdx)
   81         movnti  %rax,16(%rdi,%rdx)
   82         movnti  %rax,24(%rdi,%rdx)
   83         addq    $32,%rdx
   84         jne     1b
   85         sfence
   86         ret
   87 
   88 ENTRY(bcmp)
   89         xorq    %rax,%rax
   90 
   91         movq    %rdx,%rcx
   92         shrq    $3,%rcx
   93         cld                                     /* compare forwards */
   94         repe
   95         cmpsq
   96         jne     1f
   97 
   98         movq    %rdx,%rcx
   99         andq    $7,%rcx
  100         repe
  101         cmpsb
  102         je      2f
  103 1:
  104         incq    %rax
  105 2:
  106         ret
  107 
  108 /*
  109  * bcopy(src, dst, cnt)
  110  *       rdi, rsi, rdx
  111  *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
  112  */
  113 ENTRY(bcopy)
  114         xchgq   %rsi,%rdi
  115         movq    %rdx,%rcx
  116 
  117         movq    %rdi,%rax
  118         subq    %rsi,%rax
  119         cmpq    %rcx,%rax                       /* overlapping && src < dst? */
  120         jb      1f
  121 
  122         shrq    $3,%rcx                         /* copy by 64-bit words */
  123         cld                                     /* nope, copy forwards */
  124         rep
  125         movsq
  126         movq    %rdx,%rcx
  127         andq    $7,%rcx                         /* any bytes left? */
  128         rep
  129         movsb
  130         ret
  131 
  132         /* ALIGN_TEXT */
  133 1:
  134         addq    %rcx,%rdi                       /* copy backwards */
  135         addq    %rcx,%rsi
  136         decq    %rdi
  137         decq    %rsi
  138         andq    $7,%rcx                         /* any fractional bytes? */
  139         std
  140         rep
  141         movsb
  142         movq    %rdx,%rcx                       /* copy remainder by 32-bit words */
  143         shrq    $3,%rcx
  144         subq    $7,%rsi
  145         subq    $7,%rdi
  146         rep
  147         movsq
  148         cld
  149         ret
  150 
  151 /*
  152  * Note: memcpy does not support overlapping copies
  153  */
  154 ENTRY(memcpy)
  155         movq    %rdx,%rcx
  156         shrq    $3,%rcx                         /* copy by 64-bit words */
  157         cld                                     /* copy forwards */
  158         rep
  159         movsq
  160         movq    %rdx,%rcx
  161         andq    $7,%rcx                         /* any bytes left? */
  162         rep
  163         movsb
  164         ret
  165 
  166 /*
  167  * pagecopy(%rdi=from, %rsi=to)
  168  */
  169 ENTRY(pagecopy)
  170         movq    $-PAGE_SIZE,%rax
  171         movq    %rax,%rdx
  172         subq    %rax,%rdi
  173         subq    %rax,%rsi
  174 1:
  175         prefetchnta (%rdi,%rax)
  176         addq    $64,%rax
  177         jne     1b
  178 2:
  179         movq    (%rdi,%rdx),%rax
  180         movnti  %rax,(%rsi,%rdx)
  181         movq    8(%rdi,%rdx),%rax
  182         movnti  %rax,8(%rsi,%rdx)
  183         movq    16(%rdi,%rdx),%rax
  184         movnti  %rax,16(%rsi,%rdx)
  185         movq    24(%rdi,%rdx),%rax
  186         movnti  %rax,24(%rsi,%rdx)
  187         addq    $32,%rdx
  188         jne     2b
  189         sfence
  190         ret
  191 
  192 /* fillw(pat, base, cnt) */  
  193 /*       %rdi,%rsi, %rdx */
  194 ENTRY(fillw)
  195         movq    %rdi,%rax   
  196         movq    %rsi,%rdi
  197         movq    %rdx,%rcx
  198         cld
  199         rep
  200         stosw
  201         ret
  202 
  203 /*****************************************************************************/
  204 /* copyout and fubyte family                                                 */
  205 /*****************************************************************************/
  206 /*
  207  * Access user memory from inside the kernel. These routines and possibly
  208  * the math- and DOS emulators should be the only places that do this.
  209  *
  210  * We have to access the memory with user's permissions, so use a segment
  211  * selector with RPL 3. For writes to user space we have to additionally
  212  * check the PTE for write permission, because the 386 does not check
  213  * write permissions when we are executing with EPL 0. The 486 does check
  214  * this if the WP bit is set in CR0, so we can use a simpler version here.
  215  *
  216  * These routines set curpcb->onfault for the time they execute. When a
  217  * protection violation occurs inside the functions, the trap handler
  218  * returns to *curpcb->onfault instead of the function.
  219  */
  220 
  221 /*
  222  * copyout(from_kernel, to_user, len)  - MP SAFE
  223  *         %rdi,        %rsi,    %rdx
  224  */
  225 ENTRY(copyout)
  226         movq    PCPU(CURPCB),%rax
  227         movq    $copyout_fault,PCB_ONFAULT(%rax)
  228         testq   %rdx,%rdx                       /* anything to do? */
  229         jz      done_copyout
  230 
  231         /*
  232          * Check explicitly for non-user addresses.  If 486 write protection
  233          * is being used, this check is essential because we are in kernel
  234          * mode so the h/w does not provide any protection against writing
  235          * kernel addresses.
  236          */
  237 
  238         /*
  239          * First, prevent address wrapping.
  240          */
  241         movq    %rsi,%rax
  242         addq    %rdx,%rax
  243         jc      copyout_fault
  244 /*
  245  * XXX STOP USING VM_MAXUSER_ADDRESS.
  246  * It is an end address, not a max, so every time it is used correctly it
  247  * looks like there is an off by one error, and of course it caused an off
  248  * by one error in several places.
  249  */
  250         movq    $VM_MAXUSER_ADDRESS,%rcx
  251         cmpq    %rcx,%rax
  252         ja      copyout_fault
  253 
  254         xchgq   %rdi, %rsi
  255         /* bcopy(%rsi, %rdi, %rdx) */
  256         movq    %rdx,%rcx
  257 
  258         shrq    $3,%rcx
  259         cld
  260         rep
  261         movsq
  262         movb    %dl,%cl
  263         andb    $7,%cl
  264         rep
  265         movsb
  266 
  267 done_copyout:
  268         xorq    %rax,%rax
  269         movq    PCPU(CURPCB),%rdx
  270         movq    %rax,PCB_ONFAULT(%rdx)
  271         ret
  272 
  273         ALIGN_TEXT
  274 copyout_fault:
  275         movq    PCPU(CURPCB),%rdx
  276         movq    $0,PCB_ONFAULT(%rdx)
  277         movq    $EFAULT,%rax
  278         ret
  279 
  280 /*
  281  * copyin(from_user, to_kernel, len) - MP SAFE
  282  *        %rdi,      %rsi,      %rdx
  283  */
  284 ENTRY(copyin)
  285         movq    PCPU(CURPCB),%rax
  286         movq    $copyin_fault,PCB_ONFAULT(%rax)
  287         testq   %rdx,%rdx                       /* anything to do? */
  288         jz      done_copyin
  289 
  290         /*
  291          * make sure address is valid
  292          */
  293         movq    %rdi,%rax
  294         addq    %rdx,%rax
  295         jc      copyin_fault
  296         movq    $VM_MAXUSER_ADDRESS,%rcx
  297         cmpq    %rcx,%rax
  298         ja      copyin_fault
  299 
  300         xchgq   %rdi, %rsi
  301         movq    %rdx, %rcx
  302         movb    %cl,%al
  303         shrq    $3,%rcx                         /* copy longword-wise */
  304         cld
  305         rep
  306         movsq
  307         movb    %al,%cl
  308         andb    $7,%cl                          /* copy remaining bytes */
  309         rep
  310         movsb
  311 
  312 done_copyin:
  313         xorq    %rax,%rax
  314         movq    PCPU(CURPCB),%rdx
  315         movq    %rax,PCB_ONFAULT(%rdx)
  316         ret
  317 
  318         ALIGN_TEXT
  319 copyin_fault:
  320         movq    PCPU(CURPCB),%rdx
  321         movq    $0,PCB_ONFAULT(%rdx)
  322         movq    $EFAULT,%rax
  323         ret
  324 
  325 /*
  326  * casuptr.  Compare and set user pointer.  Returns -1 or the current value.
  327  *        dst = %rdi, old = %rsi, new = %rdx
  328  */
  329 ENTRY(casuptr)
  330         movq    PCPU(CURPCB),%rcx
  331         movq    $fusufault,PCB_ONFAULT(%rcx)
  332 
  333         movq    $VM_MAXUSER_ADDRESS-4,%rax
  334         cmpq    %rax,%rdi                       /* verify address is valid */
  335         ja      fusufault
  336 
  337         movq    %rsi, %rax                      /* old */
  338 #ifdef SMP
  339         lock
  340 #endif
  341         cmpxchgq %rdx, (%rdi)                   /* new = %rdx */
  342 
  343         /*
  344          * The old value is in %eax.  If the store succeeded it will be the
  345          * value we expected (old) from before the store, otherwise it will
  346          * be the current value.
  347          */
  348 
  349         movq    PCPU(CURPCB),%rcx
  350         movq    $fusufault,PCB_ONFAULT(%rcx)
  351         movq    $0,PCB_ONFAULT(%rcx)
  352         ret
  353 
  354 /*
  355  * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
  356  * byte from user memory.  All these functions are MPSAFE.
  357  * addr = %rdi
  358  */
  359 
  360 ALTENTRY(fuword64)
  361 ENTRY(fuword)
  362         movq    PCPU(CURPCB),%rcx
  363         movq    $fusufault,PCB_ONFAULT(%rcx)
  364 
  365         movq    $VM_MAXUSER_ADDRESS-8,%rax
  366         cmpq    %rax,%rdi                       /* verify address is valid */
  367         ja      fusufault
  368 
  369         movq    (%rdi),%rax
  370         movq    $0,PCB_ONFAULT(%rcx)
  371         ret
  372 
  373 ENTRY(fuword32)
  374         movq    PCPU(CURPCB),%rcx
  375         movq    $fusufault,PCB_ONFAULT(%rcx)
  376 
  377         movq    $VM_MAXUSER_ADDRESS-4,%rax
  378         cmpq    %rax,%rdi                       /* verify address is valid */
  379         ja      fusufault
  380 
  381         movl    (%rdi),%eax
  382         movq    $0,PCB_ONFAULT(%rcx)
  383         ret
  384 
  385 /*
  386  * fuswintr() and suswintr() are specialized variants of fuword16() and
  387  * suword16(), respectively.  They are called from the profiling code,
  388  * potentially at interrupt time.  If they fail, that's okay; good things
  389  * will happen later.  They always fail for now, until the trap code is
  390  * able to deal with this.
  391  */
  392 ALTENTRY(suswintr)
  393 ENTRY(fuswintr)
  394         movq    $-1,%rax
  395         ret
  396 
  397 ENTRY(fuword16)
  398         movq    PCPU(CURPCB),%rcx
  399         movq    $fusufault,PCB_ONFAULT(%rcx)
  400 
  401         movq    $VM_MAXUSER_ADDRESS-2,%rax
  402         cmpq    %rax,%rdi
  403         ja      fusufault
  404 
  405         movzwl  (%rdi),%eax
  406         movq    $0,PCB_ONFAULT(%rcx)
  407         ret
  408 
  409 ENTRY(fubyte)
  410         movq    PCPU(CURPCB),%rcx
  411         movq    $fusufault,PCB_ONFAULT(%rcx)
  412 
  413         movq    $VM_MAXUSER_ADDRESS-1,%rax
  414         cmpq    %rax,%rdi
  415         ja      fusufault
  416 
  417         movzbl  (%rdi),%eax
  418         movq    $0,PCB_ONFAULT(%rcx)
  419         ret
  420 
  421         ALIGN_TEXT
  422 fusufault:
  423         movq    PCPU(CURPCB),%rcx
  424         xorq    %rax,%rax
  425         movq    %rax,PCB_ONFAULT(%rcx)
  426         decq    %rax
  427         ret
  428 
  429 /*
  430  * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
  431  * user memory.  All these functions are MPSAFE.
  432  * addr = %rdi, value = %rsi
  433  */
  434 ALTENTRY(suword64)
  435 ENTRY(suword)
  436         movq    PCPU(CURPCB),%rcx
  437         movq    $fusufault,PCB_ONFAULT(%rcx)
  438 
  439         movq    $VM_MAXUSER_ADDRESS-8,%rax
  440         cmpq    %rax,%rdi                       /* verify address validity */
  441         ja      fusufault
  442 
  443         movq    %rsi,(%rdi)
  444         xorq    %rax,%rax
  445         movq    PCPU(CURPCB),%rcx
  446         movq    %rax,PCB_ONFAULT(%rcx)
  447         ret
  448 
  449 ENTRY(suword32)
  450         movq    PCPU(CURPCB),%rcx
  451         movq    $fusufault,PCB_ONFAULT(%rcx)
  452 
  453         movq    $VM_MAXUSER_ADDRESS-4,%rax
  454         cmpq    %rax,%rdi                       /* verify address validity */
  455         ja      fusufault
  456 
  457         movl    %esi,(%rdi)
  458         xorq    %rax,%rax
  459         movq    PCPU(CURPCB),%rcx
  460         movq    %rax,PCB_ONFAULT(%rcx)
  461         ret
  462 
  463 ENTRY(suword16)
  464         movq    PCPU(CURPCB),%rcx
  465         movq    $fusufault,PCB_ONFAULT(%rcx)
  466 
  467         movq    $VM_MAXUSER_ADDRESS-2,%rax
  468         cmpq    %rax,%rdi                       /* verify address validity */
  469         ja      fusufault
  470 
  471         movw    %si,(%rdi)
  472         xorq    %rax,%rax
  473         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  474         movq    %rax,PCB_ONFAULT(%rcx)
  475         ret
  476 
  477 ENTRY(subyte)
  478         movq    PCPU(CURPCB),%rcx
  479         movq    $fusufault,PCB_ONFAULT(%rcx)
  480 
  481         movq    $VM_MAXUSER_ADDRESS-1,%rax
  482         cmpq    %rax,%rdi                       /* verify address validity */
  483         ja      fusufault
  484 
  485         movl    %esi, %eax
  486         movb    %al,(%rdi)
  487         xorq    %rax,%rax
  488         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  489         movq    %rax,PCB_ONFAULT(%rcx)
  490         ret
  491 
  492 /*
  493  * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
  494  *           %rdi, %rsi, %rdx, %rcx
  495  *
  496  *      copy a string from from to to, stop when a 0 character is reached.
  497  *      return ENAMETOOLONG if string is longer than maxlen, and
  498  *      EFAULT on protection violations. If lencopied is non-zero,
  499  *      return the actual length in *lencopied.
  500  */
  501 ENTRY(copyinstr)
  502         movq    %rdx, %r8                       /* %r8 = maxlen */
  503         movq    %rcx, %r9                       /* %r9 = *len */
  504         xchgq   %rdi, %rsi                      /* %rdi = from, %rsi = to */
  505         movq    PCPU(CURPCB),%rcx
  506         movq    $cpystrflt,PCB_ONFAULT(%rcx)
  507 
  508         movq    $VM_MAXUSER_ADDRESS,%rax
  509 
  510         /* make sure 'from' is within bounds */
  511         subq    %rsi,%rax
  512         jbe     cpystrflt
  513 
  514         /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
  515         cmpq    %rdx,%rax
  516         jae     1f
  517         movq    %rax,%rdx
  518         movq    %rax,%r8
  519 1:
  520         incq    %rdx
  521         cld
  522 
  523 2:
  524         decq    %rdx
  525         jz      3f
  526 
  527         lodsb
  528         stosb
  529         orb     %al,%al
  530         jnz     2b
  531 
  532         /* Success -- 0 byte reached */
  533         decq    %rdx
  534         xorq    %rax,%rax
  535         jmp     cpystrflt_x
  536 3:
  537         /* rdx is zero - return ENAMETOOLONG or EFAULT */
  538         movq    $VM_MAXUSER_ADDRESS,%rax
  539         cmpq    %rax,%rsi
  540         jae     cpystrflt
  541 4:
  542         movq    $ENAMETOOLONG,%rax
  543         jmp     cpystrflt_x
  544 
  545 cpystrflt:
  546         movq    $EFAULT,%rax
  547 
  548 cpystrflt_x:
  549         /* set *lencopied and return %eax */
  550         movq    PCPU(CURPCB),%rcx
  551         movq    $0,PCB_ONFAULT(%rcx)
  552 
  553         testq   %r9,%r9
  554         jz      1f
  555         subq    %rdx,%r8
  556         movq    %r8,(%r9)
  557 1:
  558         ret
  559 
  560 
  561 /*
  562  * copystr(from, to, maxlen, int *lencopied) - MP SAFE
  563  *         %rdi, %rsi, %rdx, %rcx
  564  */
  565 ENTRY(copystr)
  566         movq    %rdx, %r8                       /* %r8 = maxlen */
  567 
  568         xchgq   %rdi, %rsi
  569         incq    %rdx
  570         cld
  571 1:
  572         decq    %rdx
  573         jz      4f
  574         lodsb
  575         stosb
  576         orb     %al,%al
  577         jnz     1b
  578 
  579         /* Success -- 0 byte reached */
  580         decq    %rdx
  581         xorq    %rax,%rax
  582         jmp     6f
  583 4:
  584         /* rdx is zero -- return ENAMETOOLONG */
  585         movq    $ENAMETOOLONG,%rax
  586 
  587 6:
  588 
  589         testq   %rcx, %rcx
  590         jz      7f
  591         /* set *lencopied and return %rax */
  592         subq    %rdx, %r8
  593         movq    %r8, (%rcx)
  594 7:
  595         ret
  596 
  597 /*
  598  * Handling of special 386 registers and descriptor tables etc
  599  * %rdi
  600  */
  601 /* void lgdt(struct region_descriptor *rdp); */
  602 ENTRY(lgdt)
  603         /* reload the descriptor table */
  604         lgdt    (%rdi)
  605 
  606         /* flush the prefetch q */
  607         jmp     1f
  608         nop
  609 1:
  610         movl    $KDSEL, %eax
  611         movl    %eax,%ds
  612         movl    %eax,%es
  613         movl    %eax,%fs        /* Beware, use wrmsr to set 64 bit base */
  614         movl    %eax,%gs
  615         movl    %eax,%ss
  616 
  617         /* reload code selector by turning return into intersegmental return */
  618         popq    %rax
  619         pushq   $KCSEL
  620         pushq   %rax
  621         MEXITCOUNT
  622         lretq
  623 
  624 /*****************************************************************************/
  625 /* setjump, longjump                                                         */
  626 /*****************************************************************************/
  627 
  628 ENTRY(setjmp)
  629         movq    %rbx,0(%rdi)                    /* save rbx */
  630         movq    %rsp,8(%rdi)                    /* save rsp */
  631         movq    %rbp,16(%rdi)                   /* save rbp */
  632         movq    %r12,24(%rdi)                   /* save r12 */
  633         movq    %r13,32(%rdi)                   /* save r13 */
  634         movq    %r14,40(%rdi)                   /* save r14 */
  635         movq    %r15,48(%rdi)                   /* save r15 */
  636         movq    0(%rsp),%rdx                    /* get rta */
  637         movq    %rdx,56(%rdi)                   /* save rip */
  638         xorl    %eax,%eax                       /* return(0); */
  639         ret
  640 
  641 ENTRY(longjmp)
  642         movq    0(%rdi),%rbx                    /* restore rbx */
  643         movq    8(%rdi),%rsp                    /* restore rsp */
  644         movq    16(%rdi),%rbp                   /* restore rbp */
  645         movq    24(%rdi),%r12                   /* restore r12 */
  646         movq    32(%rdi),%r13                   /* restore r13 */
  647         movq    40(%rdi),%r14                   /* restore r14 */
  648         movq    48(%rdi),%r15                   /* restore r15 */
  649         movq    56(%rdi),%rdx                   /* get rta */
  650         movq    %rdx,0(%rsp)                    /* put in return frame */
  651         xorl    %eax,%eax                       /* return(1); */
  652         incl    %eax
  653         ret
  654 
  655 /*
  656  * Support for BB-profiling (gcc -a).  The kernbb program will extract
  657  * the data from the kernel.
  658  */
  659 
  660         .data
  661         ALIGN_DATA
  662         .globl bbhead
  663 bbhead:
  664         .quad 0
  665 
  666         .text
  667 NON_GPROF_ENTRY(__bb_init_func)
  668         movq    $1,(%rdi)
  669         movq    bbhead,%rax
  670         movq    %rax,32(%rdi)
  671         movq    %rdi,bbhead
  672         NON_GPROF_RET

Cache object: 5d4b2fec24a356a4e782ee80019c89a2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.