The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/support.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Peter Wemm.
    3  * Copyright (c) 1993 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 4. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * $FreeBSD$
   31  */
   32 
   33 #include "opt_ddb.h"
   34 
   35 #include <machine/asmacros.h>
   36 #include <machine/intr_machdep.h>
   37 #include <machine/pmap.h>
   38 
   39 #include "assym.s"
   40 
   41         ALIGN_DATA
   42         .globl  intrcnt, eintrcnt
   43 intrcnt:
   44         .space  INTRCNT_COUNT * 8
   45 eintrcnt:
   46 
   47         .globl  intrnames, eintrnames
   48 intrnames:
   49         .space  INTRCNT_COUNT * (MAXCOMLEN + 1)
   50 eintrnames:
   51 
   52         .text
   53 
   54 /*
   55  * bcopy family
   56  * void bzero(void *buf, u_int len)
   57  */
   58 
   59 /* done */
   60 ENTRY(bzero)
   61         movq    %rsi,%rcx
   62         xorl    %eax,%eax
   63         shrq    $3,%rcx
   64         cld
   65         rep
   66         stosq
   67         movq    %rsi,%rcx
   68         andq    $7,%rcx
   69         rep
   70         stosb
   71         ret
   72 END(bzero)
   73         
   74 /* Address: %rdi */
   75 ENTRY(pagezero)
   76         movq    $-PAGE_SIZE,%rdx
   77         subq    %rdx,%rdi
   78         xorl    %eax,%eax
   79 1:
   80         movnti  %rax,(%rdi,%rdx)
   81         movnti  %rax,8(%rdi,%rdx)
   82         movnti  %rax,16(%rdi,%rdx)
   83         movnti  %rax,24(%rdi,%rdx)
   84         addq    $32,%rdx
   85         jne     1b
   86         sfence
   87         ret
   88 END(pagezero)
   89 
   90 ENTRY(bcmp)
   91         movq    %rdx,%rcx
   92         shrq    $3,%rcx
   93         cld                                     /* compare forwards */
   94         repe
   95         cmpsq
   96         jne     1f
   97 
   98         movq    %rdx,%rcx
   99         andq    $7,%rcx
  100         repe
  101         cmpsb
  102 1:
  103         setne   %al
  104         movsbl  %al,%eax
  105         ret
  106 END(bcmp)
  107 
  108 /*
  109  * bcopy(src, dst, cnt)
  110  *       rdi, rsi, rdx
  111  *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
  112  */
  113 ENTRY(bcopy)
  114         xchgq   %rsi,%rdi
  115         movq    %rdx,%rcx
  116 
  117         movq    %rdi,%rax
  118         subq    %rsi,%rax
  119         cmpq    %rcx,%rax                       /* overlapping && src < dst? */
  120         jb      1f
  121 
  122         shrq    $3,%rcx                         /* copy by 64-bit words */
  123         cld                                     /* nope, copy forwards */
  124         rep
  125         movsq
  126         movq    %rdx,%rcx
  127         andq    $7,%rcx                         /* any bytes left? */
  128         rep
  129         movsb
  130         ret
  131 
  132         /* ALIGN_TEXT */
  133 1:
  134         addq    %rcx,%rdi                       /* copy backwards */
  135         addq    %rcx,%rsi
  136         decq    %rdi
  137         decq    %rsi
  138         andq    $7,%rcx                         /* any fractional bytes? */
  139         std
  140         rep
  141         movsb
  142         movq    %rdx,%rcx                       /* copy remainder by 32-bit words */
  143         shrq    $3,%rcx
  144         subq    $7,%rsi
  145         subq    $7,%rdi
  146         rep
  147         movsq
  148         cld
  149         ret
  150 END(bcopy)
  151         
  152 /*
  153  * Note: memcpy does not support overlapping copies
  154  */
  155 ENTRY(memcpy)
  156         movq    %rdx,%rcx
  157         shrq    $3,%rcx                         /* copy by 64-bit words */
  158         cld                                     /* copy forwards */
  159         rep
  160         movsq
  161         movq    %rdx,%rcx
  162         andq    $7,%rcx                         /* any bytes left? */
  163         rep
  164         movsb
  165         ret
  166 END(memcpy)
  167 
  168 /*
  169  * pagecopy(%rdi=from, %rsi=to)
  170  */
  171 ENTRY(pagecopy)
  172         movq    $-PAGE_SIZE,%rax
  173         movq    %rax,%rdx
  174         subq    %rax,%rdi
  175         subq    %rax,%rsi
  176 1:
  177         prefetchnta (%rdi,%rax)
  178         addq    $64,%rax
  179         jne     1b
  180 2:
  181         movq    (%rdi,%rdx),%rax
  182         movnti  %rax,(%rsi,%rdx)
  183         movq    8(%rdi,%rdx),%rax
  184         movnti  %rax,8(%rsi,%rdx)
  185         movq    16(%rdi,%rdx),%rax
  186         movnti  %rax,16(%rsi,%rdx)
  187         movq    24(%rdi,%rdx),%rax
  188         movnti  %rax,24(%rsi,%rdx)
  189         addq    $32,%rdx
  190         jne     2b
  191         sfence
  192         ret
  193 END(pagecopy)
  194 
  195 /* fillw(pat, base, cnt) */  
  196 /*       %rdi,%rsi, %rdx */
  197 ENTRY(fillw)
  198         movq    %rdi,%rax   
  199         movq    %rsi,%rdi
  200         movq    %rdx,%rcx
  201         cld
  202         rep
  203         stosw
  204         ret
  205 END(fillw)
  206 
  207 /*****************************************************************************/
  208 /* copyout and fubyte family                                                 */
  209 /*****************************************************************************/
  210 /*
  211  * Access user memory from inside the kernel. These routines should be
  212  * the only places that do this.
  213  *
  214  * These routines set curpcb->onfault for the time they execute. When a
  215  * protection violation occurs inside the functions, the trap handler
  216  * returns to *curpcb->onfault instead of the function.
  217  */
  218 
  219 /*
  220  * copyout(from_kernel, to_user, len)  - MP SAFE
  221  *         %rdi,        %rsi,    %rdx
  222  */
  223 ENTRY(copyout)
  224         movq    PCPU(CURPCB),%rax
  225         movq    $copyout_fault,PCB_ONFAULT(%rax)
  226         testq   %rdx,%rdx                       /* anything to do? */
  227         jz      done_copyout
  228 
  229         /*
  230          * Check explicitly for non-user addresses.  If 486 write protection
  231          * is being used, this check is essential because we are in kernel
  232          * mode so the h/w does not provide any protection against writing
  233          * kernel addresses.
  234          */
  235 
  236         /*
  237          * First, prevent address wrapping.
  238          */
  239         movq    %rsi,%rax
  240         addq    %rdx,%rax
  241         jc      copyout_fault
  242 /*
  243  * XXX STOP USING VM_MAXUSER_ADDRESS.
  244  * It is an end address, not a max, so every time it is used correctly it
  245  * looks like there is an off by one error, and of course it caused an off
  246  * by one error in several places.
  247  */
  248         movq    $VM_MAXUSER_ADDRESS,%rcx
  249         cmpq    %rcx,%rax
  250         ja      copyout_fault
  251 
  252         xchgq   %rdi,%rsi
  253         /* bcopy(%rsi, %rdi, %rdx) */
  254         movq    %rdx,%rcx
  255 
  256         shrq    $3,%rcx
  257         cld
  258         rep
  259         movsq
  260         movb    %dl,%cl
  261         andb    $7,%cl
  262         rep
  263         movsb
  264 
  265 done_copyout:
  266         xorl    %eax,%eax
  267         movq    PCPU(CURPCB),%rdx
  268         movq    %rax,PCB_ONFAULT(%rdx)
  269         ret
  270 
  271         ALIGN_TEXT
  272 copyout_fault:
  273         movq    PCPU(CURPCB),%rdx
  274         movq    $0,PCB_ONFAULT(%rdx)
  275         movq    $EFAULT,%rax
  276         ret
  277 END(copyout)
  278 
  279 /*
  280  * copyin(from_user, to_kernel, len) - MP SAFE
  281  *        %rdi,      %rsi,      %rdx
  282  */
  283 ENTRY(copyin)
  284         movq    PCPU(CURPCB),%rax
  285         movq    $copyin_fault,PCB_ONFAULT(%rax)
  286         testq   %rdx,%rdx                       /* anything to do? */
  287         jz      done_copyin
  288 
  289         /*
  290          * make sure address is valid
  291          */
  292         movq    %rdi,%rax
  293         addq    %rdx,%rax
  294         jc      copyin_fault
  295         movq    $VM_MAXUSER_ADDRESS,%rcx
  296         cmpq    %rcx,%rax
  297         ja      copyin_fault
  298 
  299         xchgq   %rdi,%rsi
  300         movq    %rdx,%rcx
  301         movb    %cl,%al
  302         shrq    $3,%rcx                         /* copy longword-wise */
  303         cld
  304         rep
  305         movsq
  306         movb    %al,%cl
  307         andb    $7,%cl                          /* copy remaining bytes */
  308         rep
  309         movsb
  310 
  311 done_copyin:
  312         xorl    %eax,%eax
  313         movq    PCPU(CURPCB),%rdx
  314         movq    %rax,PCB_ONFAULT(%rdx)
  315         ret
  316 
  317         ALIGN_TEXT
  318 copyin_fault:
  319         movq    PCPU(CURPCB),%rdx
  320         movq    $0,PCB_ONFAULT(%rdx)
  321         movq    $EFAULT,%rax
  322         ret
  323 END(copyin)
  324 
  325 /*
  326  * casuword32.  Compare and set user integer.  Returns -1 or the current value.
  327  *        dst = %rdi, old = %rsi, new = %rdx
  328  */
  329 ENTRY(casuword32)
  330         movq    PCPU(CURPCB),%rcx
  331         movq    $fusufault,PCB_ONFAULT(%rcx)
  332 
  333         movq    $VM_MAXUSER_ADDRESS-4,%rax
  334         cmpq    %rax,%rdi                       /* verify address is valid */
  335         ja      fusufault
  336 
  337         movl    %esi,%eax                       /* old */
  338 #ifdef SMP
  339         lock
  340 #endif
  341         cmpxchgl %edx,(%rdi)                    /* new = %edx */
  342 
  343         /*
  344          * The old value is in %eax.  If the store succeeded it will be the
  345          * value we expected (old) from before the store, otherwise it will
  346          * be the current value.
  347          */
  348 
  349         movq    PCPU(CURPCB),%rcx
  350         movq    $0,PCB_ONFAULT(%rcx)
  351         ret
  352 END(casuword32)
  353 
  354 /*
  355  * casuword.  Compare and set user word.  Returns -1 or the current value.
  356  *        dst = %rdi, old = %rsi, new = %rdx
  357  */
  358 ENTRY(casuword)
  359         movq    PCPU(CURPCB),%rcx
  360         movq    $fusufault,PCB_ONFAULT(%rcx)
  361 
  362         movq    $VM_MAXUSER_ADDRESS-4,%rax
  363         cmpq    %rax,%rdi                       /* verify address is valid */
  364         ja      fusufault
  365 
  366         movq    %rsi,%rax                       /* old */
  367 #ifdef SMP
  368         lock
  369 #endif
  370         cmpxchgq %rdx,(%rdi)                    /* new = %rdx */
  371 
  372         /*
  373          * The old value is in %eax.  If the store succeeded it will be the
  374          * value we expected (old) from before the store, otherwise it will
  375          * be the current value.
  376          */
  377 
  378         movq    PCPU(CURPCB),%rcx
  379         movq    $fusufault,PCB_ONFAULT(%rcx)
  380         movq    $0,PCB_ONFAULT(%rcx)
  381         ret
  382 END(casuword)
  383 
  384 /*
  385  * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
  386  * byte from user memory.  All these functions are MPSAFE.
  387  * addr = %rdi
  388  */
  389 
  390 ALTENTRY(fuword64)
  391 ENTRY(fuword)
  392         movq    PCPU(CURPCB),%rcx
  393         movq    $fusufault,PCB_ONFAULT(%rcx)
  394 
  395         movq    $VM_MAXUSER_ADDRESS-8,%rax
  396         cmpq    %rax,%rdi                       /* verify address is valid */
  397         ja      fusufault
  398 
  399         movq    (%rdi),%rax
  400         movq    $0,PCB_ONFAULT(%rcx)
  401         ret
  402 END(fuword64)   
  403 END(fuword)
  404 
  405 ENTRY(fuword32)
  406         movq    PCPU(CURPCB),%rcx
  407         movq    $fusufault,PCB_ONFAULT(%rcx)
  408 
  409         movq    $VM_MAXUSER_ADDRESS-4,%rax
  410         cmpq    %rax,%rdi                       /* verify address is valid */
  411         ja      fusufault
  412 
  413         movl    (%rdi),%eax
  414         movq    $0,PCB_ONFAULT(%rcx)
  415         ret
  416 END(fuword32)
  417 
  418 /*
  419  * fuswintr() and suswintr() are specialized variants of fuword16() and
  420  * suword16(), respectively.  They are called from the profiling code,
  421  * potentially at interrupt time.  If they fail, that's okay; good things
  422  * will happen later.  They always fail for now, until the trap code is
  423  * able to deal with this.
  424  */
  425 ALTENTRY(suswintr)
  426 ENTRY(fuswintr)
  427         movq    $-1,%rax
  428         ret
  429 END(suswintr)
  430 END(fuswintr)
  431 
  432 ENTRY(fuword16)
  433         movq    PCPU(CURPCB),%rcx
  434         movq    $fusufault,PCB_ONFAULT(%rcx)
  435 
  436         movq    $VM_MAXUSER_ADDRESS-2,%rax
  437         cmpq    %rax,%rdi
  438         ja      fusufault
  439 
  440         movzwl  (%rdi),%eax
  441         movq    $0,PCB_ONFAULT(%rcx)
  442         ret
  443 END(fuword16)
  444 
  445 ENTRY(fubyte)
  446         movq    PCPU(CURPCB),%rcx
  447         movq    $fusufault,PCB_ONFAULT(%rcx)
  448 
  449         movq    $VM_MAXUSER_ADDRESS-1,%rax
  450         cmpq    %rax,%rdi
  451         ja      fusufault
  452 
  453         movzbl  (%rdi),%eax
  454         movq    $0,PCB_ONFAULT(%rcx)
  455         ret
  456 END(fubyte)
  457 
  458         ALIGN_TEXT
  459 fusufault:
  460         movq    PCPU(CURPCB),%rcx
  461         xorl    %eax,%eax
  462         movq    %rax,PCB_ONFAULT(%rcx)
  463         decq    %rax
  464         ret
  465 
  466 /*
  467  * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
  468  * user memory.  All these functions are MPSAFE.
  469  * addr = %rdi, value = %rsi
  470  */
  471 ALTENTRY(suword64)
  472 ENTRY(suword)
  473         movq    PCPU(CURPCB),%rcx
  474         movq    $fusufault,PCB_ONFAULT(%rcx)
  475 
  476         movq    $VM_MAXUSER_ADDRESS-8,%rax
  477         cmpq    %rax,%rdi                       /* verify address validity */
  478         ja      fusufault
  479 
  480         movq    %rsi,(%rdi)
  481         xorl    %eax,%eax
  482         movq    PCPU(CURPCB),%rcx
  483         movq    %rax,PCB_ONFAULT(%rcx)
  484         ret
  485 END(suword64)
  486 END(suword)
  487 
  488 ENTRY(suword32)
  489         movq    PCPU(CURPCB),%rcx
  490         movq    $fusufault,PCB_ONFAULT(%rcx)
  491 
  492         movq    $VM_MAXUSER_ADDRESS-4,%rax
  493         cmpq    %rax,%rdi                       /* verify address validity */
  494         ja      fusufault
  495 
  496         movl    %esi,(%rdi)
  497         xorl    %eax,%eax
  498         movq    PCPU(CURPCB),%rcx
  499         movq    %rax,PCB_ONFAULT(%rcx)
  500         ret
  501 END(suword32)
  502 
  503 ENTRY(suword16)
  504         movq    PCPU(CURPCB),%rcx
  505         movq    $fusufault,PCB_ONFAULT(%rcx)
  506 
  507         movq    $VM_MAXUSER_ADDRESS-2,%rax
  508         cmpq    %rax,%rdi                       /* verify address validity */
  509         ja      fusufault
  510 
  511         movw    %si,(%rdi)
  512         xorl    %eax,%eax
  513         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  514         movq    %rax,PCB_ONFAULT(%rcx)
  515         ret
  516 END(suword16)
  517 
  518 ENTRY(subyte)
  519         movq    PCPU(CURPCB),%rcx
  520         movq    $fusufault,PCB_ONFAULT(%rcx)
  521 
  522         movq    $VM_MAXUSER_ADDRESS-1,%rax
  523         cmpq    %rax,%rdi                       /* verify address validity */
  524         ja      fusufault
  525 
  526         movl    %esi,%eax
  527         movb    %al,(%rdi)
  528         xorl    %eax,%eax
  529         movq    PCPU(CURPCB),%rcx               /* restore trashed register */
  530         movq    %rax,PCB_ONFAULT(%rcx)
  531         ret
  532 END(subyte)
  533 
  534 /*
  535  * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
  536  *           %rdi, %rsi, %rdx, %rcx
  537  *
  538  *      copy a string from from to to, stop when a 0 character is reached.
  539  *      return ENAMETOOLONG if string is longer than maxlen, and
  540  *      EFAULT on protection violations. If lencopied is non-zero,
  541  *      return the actual length in *lencopied.
  542  */
  543 ENTRY(copyinstr)
  544         movq    %rdx,%r8                        /* %r8 = maxlen */
  545         movq    %rcx,%r9                        /* %r9 = *len */
  546         xchgq   %rdi,%rsi                       /* %rdi = from, %rsi = to */
  547         movq    PCPU(CURPCB),%rcx
  548         movq    $cpystrflt,PCB_ONFAULT(%rcx)
  549 
  550         movq    $VM_MAXUSER_ADDRESS,%rax
  551 
  552         /* make sure 'from' is within bounds */
  553         subq    %rsi,%rax
  554         jbe     cpystrflt
  555 
  556         /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
  557         cmpq    %rdx,%rax
  558         jae     1f
  559         movq    %rax,%rdx
  560         movq    %rax,%r8
  561 1:
  562         incq    %rdx
  563         cld
  564 
  565 2:
  566         decq    %rdx
  567         jz      3f
  568 
  569         lodsb
  570         stosb
  571         orb     %al,%al
  572         jnz     2b
  573 
  574         /* Success -- 0 byte reached */
  575         decq    %rdx
  576         xorl    %eax,%eax
  577         jmp     cpystrflt_x
  578 3:
  579         /* rdx is zero - return ENAMETOOLONG or EFAULT */
  580         movq    $VM_MAXUSER_ADDRESS,%rax
  581         cmpq    %rax,%rsi
  582         jae     cpystrflt
  583 4:
  584         movq    $ENAMETOOLONG,%rax
  585         jmp     cpystrflt_x
  586 
  587 cpystrflt:
  588         movq    $EFAULT,%rax
  589 
  590 cpystrflt_x:
  591         /* set *lencopied and return %eax */
  592         movq    PCPU(CURPCB),%rcx
  593         movq    $0,PCB_ONFAULT(%rcx)
  594 
  595         testq   %r9,%r9
  596         jz      1f
  597         subq    %rdx,%r8
  598         movq    %r8,(%r9)
  599 1:
  600         ret
  601 END(copyinstr)
  602 
  603 /*
  604  * copystr(from, to, maxlen, int *lencopied) - MP SAFE
  605  *         %rdi, %rsi, %rdx, %rcx
  606  */
  607 ENTRY(copystr)
  608         movq    %rdx,%r8                        /* %r8 = maxlen */
  609 
  610         xchgq   %rdi,%rsi
  611         incq    %rdx
  612         cld
  613 1:
  614         decq    %rdx
  615         jz      4f
  616         lodsb
  617         stosb
  618         orb     %al,%al
  619         jnz     1b
  620 
  621         /* Success -- 0 byte reached */
  622         decq    %rdx
  623         xorl    %eax,%eax
  624         jmp     6f
  625 4:
  626         /* rdx is zero -- return ENAMETOOLONG */
  627         movq    $ENAMETOOLONG,%rax
  628 
  629 6:
  630 
  631         testq   %rcx,%rcx
  632         jz      7f
  633         /* set *lencopied and return %rax */
  634         subq    %rdx,%r8
  635         movq    %r8,(%rcx)
  636 7:
  637         ret
  638 END(copystr)
  639 
  640 /*
  641  * Handling of special amd64 registers and descriptor tables etc
  642  * %rdi
  643  */
  644 /* void lgdt(struct region_descriptor *rdp); */
  645 ENTRY(lgdt)
  646         /* reload the descriptor table */
  647         lgdt    (%rdi)
  648 
  649         /* flush the prefetch q */
  650         jmp     1f
  651         nop
  652 1:
  653         movl    $KDSEL,%eax
  654         movl    %eax,%ds
  655         movl    %eax,%es
  656         movl    %eax,%fs        /* Beware, use wrmsr to set 64 bit base */
  657         movl    %eax,%gs
  658         movl    %eax,%ss
  659 
  660         /* reload code selector by turning return into intersegmental return */
  661         popq    %rax
  662         pushq   $KCSEL
  663         pushq   %rax
  664         MEXITCOUNT
  665         lretq
  666 END(lgdt)
  667 
  668 /*****************************************************************************/
  669 /* setjump, longjump                                                         */
  670 /*****************************************************************************/
  671 
  672 ENTRY(setjmp)
  673         movq    %rbx,0(%rdi)                    /* save rbx */
  674         movq    %rsp,8(%rdi)                    /* save rsp */
  675         movq    %rbp,16(%rdi)                   /* save rbp */
  676         movq    %r12,24(%rdi)                   /* save r12 */
  677         movq    %r13,32(%rdi)                   /* save r13 */
  678         movq    %r14,40(%rdi)                   /* save r14 */
  679         movq    %r15,48(%rdi)                   /* save r15 */
  680         movq    0(%rsp),%rdx                    /* get rta */
  681         movq    %rdx,56(%rdi)                   /* save rip */
  682         xorl    %eax,%eax                       /* return(0); */
  683         ret
  684 END(setjmp)
  685 
  686 ENTRY(longjmp)
  687         movq    0(%rdi),%rbx                    /* restore rbx */
  688         movq    8(%rdi),%rsp                    /* restore rsp */
  689         movq    16(%rdi),%rbp                   /* restore rbp */
  690         movq    24(%rdi),%r12                   /* restore r12 */
  691         movq    32(%rdi),%r13                   /* restore r13 */
  692         movq    40(%rdi),%r14                   /* restore r14 */
  693         movq    48(%rdi),%r15                   /* restore r15 */
  694         movq    56(%rdi),%rdx                   /* get rta */
  695         movq    %rdx,0(%rsp)                    /* put in return frame */
  696         xorl    %eax,%eax                       /* return(1); */
  697         incl    %eax
  698         ret
  699 END(longjmp)
  700 
  701 /*
  702  * Support for BB-profiling (gcc -a).  The kernbb program will extract
  703  * the data from the kernel.
  704  */
  705 
  706         .data
  707         ALIGN_DATA
  708         .globl bbhead
  709 bbhead:
  710         .quad 0
  711 
  712         .text
  713 NON_GPROF_ENTRY(__bb_init_func)
  714         movq    $1,(%rdi)
  715         movq    bbhead,%rax
  716         movq    %rax,32(%rdi)
  717         movq    %rdi,bbhead
  718         NON_GPROF_RET
  719 
  720 /*
  721  * Support for reading MSRs in the safe manner.
  722  */
  723 ENTRY(rdmsr_safe)
  724 /* int rdmsr_safe(u_int msr, uint64_t *data) */
  725         movq    PCPU(CURPCB),%r8
  726         movq    $msr_onfault,PCB_ONFAULT(%r8)
  727         movl    %edi,%ecx
  728         rdmsr                   /* Read MSR pointed by %ecx. Returns
  729                                    hi byte in edx, lo in %eax */
  730         salq    $32,%rdx        /* sign-shift %rdx left */
  731         cltq                    /* sign-extend %eax -> %rax */
  732         orq     %rdx,%rax
  733         movq    %rax,(%rsi)
  734         xorq    %rax,%rax
  735         movq    %rax,PCB_ONFAULT(%r8)
  736         ret
  737 
  738 /*
  739  * Support for writing MSRs in the safe manner.
  740  */
  741 ENTRY(wrmsr_safe)
  742 /* int wrmsr_safe(u_int msr, uint64_t data) */
  743         movq    PCPU(CURPCB),%r8
  744         movq    $msr_onfault,PCB_ONFAULT(%r8)
  745         movl    %edi,%ecx
  746         movl    %esi,%eax
  747         sarq    $32,%rsi
  748         movl    %esi,%edx
  749         wrmsr                   /* Write MSR pointed by %ecx. Accepts
  750                                    hi byte in edx, lo in %eax. */
  751         xorq    %rax,%rax
  752         movq    %rax,PCB_ONFAULT(%r8)
  753         ret
  754 
  755 /*
  756  * MSR operations fault handler
  757  */
  758         ALIGN_TEXT
  759 msr_onfault:
  760         movq    $0,PCB_ONFAULT(%r8)
  761         movl    $EFAULT,%eax
  762         ret

Cache object: ff63c51bc8e541cec737ca9222a5396e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.