The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/support.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1993 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: releng/11.1/sys/i386/i386/support.s 330908 2018-03-14 04:00:00Z gordon $
   30  */
   31 
   32 #include <machine/asmacros.h>
   33 #include <machine/cputypes.h>
   34 #include <machine/pmap.h>
   35 #include <machine/specialreg.h>
   36 
   37 #include "assym.s"
   38 
   39 #define IDXSHIFT        10
   40 
   41         .text
   42 
   43 /*
   44  * bcopy family
   45  * void bzero(void *buf, u_int len)
   46  */
   47 ENTRY(bzero)
   48         pushl   %edi
   49         movl    8(%esp),%edi
   50         movl    12(%esp),%ecx
   51         xorl    %eax,%eax
   52         shrl    $2,%ecx
   53         cld
   54         rep
   55         stosl
   56         movl    12(%esp),%ecx
   57         andl    $3,%ecx
   58         rep
   59         stosb
   60         popl    %edi
   61         ret
   62 END(bzero)
   63 
   64 ENTRY(sse2_pagezero)
   65         pushl   %ebx
   66         movl    8(%esp),%ecx
   67         movl    %ecx,%eax
   68         addl    $4096,%eax
   69         xor     %ebx,%ebx
   70 1:
   71         movnti  %ebx,(%ecx)
   72         addl    $4,%ecx
   73         cmpl    %ecx,%eax
   74         jne     1b
   75         sfence
   76         popl    %ebx
   77         ret
   78 END(sse2_pagezero)
   79 
   80 ENTRY(i686_pagezero)
   81         pushl   %edi
   82         pushl   %ebx
   83 
   84         movl    12(%esp),%edi
   85         movl    $1024,%ecx
   86         cld
   87 
   88         ALIGN_TEXT
   89 1:
   90         xorl    %eax,%eax
   91         repe
   92         scasl
   93         jnz     2f
   94 
   95         popl    %ebx
   96         popl    %edi
   97         ret
   98 
   99         ALIGN_TEXT
  100 
  101 2:
  102         incl    %ecx
  103         subl    $4,%edi
  104 
  105         movl    %ecx,%edx
  106         cmpl    $16,%ecx
  107 
  108         jge     3f
  109 
  110         movl    %edi,%ebx
  111         andl    $0x3f,%ebx
  112         shrl    %ebx
  113         shrl    %ebx
  114         movl    $16,%ecx
  115         subl    %ebx,%ecx
  116 
  117 3:
  118         subl    %ecx,%edx
  119         rep
  120         stosl
  121 
  122         movl    %edx,%ecx
  123         testl   %edx,%edx
  124         jnz     1b
  125 
  126         popl    %ebx
  127         popl    %edi
  128         ret
  129 END(i686_pagezero)
  130 
  131 /* fillw(pat, base, cnt) */
  132 ENTRY(fillw)
  133         pushl   %edi
  134         movl    8(%esp),%eax
  135         movl    12(%esp),%edi
  136         movl    16(%esp),%ecx
  137         cld
  138         rep
  139         stosw
  140         popl    %edi
  141         ret
  142 END(fillw)
  143 
  144 ENTRY(bcopyb)
  145         pushl   %esi
  146         pushl   %edi
  147         movl    12(%esp),%esi
  148         movl    16(%esp),%edi
  149         movl    20(%esp),%ecx
  150         movl    %edi,%eax
  151         subl    %esi,%eax
  152         cmpl    %ecx,%eax                       /* overlapping && src < dst? */
  153         jb      1f
  154         cld                                     /* nope, copy forwards */
  155         rep
  156         movsb
  157         popl    %edi
  158         popl    %esi
  159         ret
  160 
  161         ALIGN_TEXT
  162 1:
  163         addl    %ecx,%edi                       /* copy backwards. */
  164         addl    %ecx,%esi
  165         decl    %edi
  166         decl    %esi
  167         std
  168         rep
  169         movsb
  170         popl    %edi
  171         popl    %esi
  172         cld
  173         ret
  174 END(bcopyb)
  175 
  176 /*
  177  * bcopy(src, dst, cnt)
  178  *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
  179  */
  180 ENTRY(bcopy)
  181         pushl   %ebp
  182         movl    %esp,%ebp
  183         pushl   %esi
  184         pushl   %edi
  185         movl    8(%ebp),%esi
  186         movl    12(%ebp),%edi
  187         movl    16(%ebp),%ecx
  188 
  189         movl    %edi,%eax
  190         subl    %esi,%eax
  191         cmpl    %ecx,%eax                       /* overlapping && src < dst? */
  192         jb      1f
  193 
  194         shrl    $2,%ecx                         /* copy by 32-bit words */
  195         cld                                     /* nope, copy forwards */
  196         rep
  197         movsl
  198         movl    16(%ebp),%ecx
  199         andl    $3,%ecx                         /* any bytes left? */
  200         rep
  201         movsb
  202         popl    %edi
  203         popl    %esi
  204         popl    %ebp
  205         ret
  206 
  207         ALIGN_TEXT
  208 1:
  209         addl    %ecx,%edi                       /* copy backwards */
  210         addl    %ecx,%esi
  211         decl    %edi
  212         decl    %esi
  213         andl    $3,%ecx                         /* any fractional bytes? */
  214         std
  215         rep
  216         movsb
  217         movl    16(%ebp),%ecx                   /* copy remainder by 32-bit words */
  218         shrl    $2,%ecx
  219         subl    $3,%esi
  220         subl    $3,%edi
  221         rep
  222         movsl
  223         popl    %edi
  224         popl    %esi
  225         cld
  226         popl    %ebp
  227         ret
  228 END(bcopy)
  229 
  230 /*
  231  * Note: memcpy does not support overlapping copies
  232  */
  233 ENTRY(memcpy)
  234         pushl   %edi
  235         pushl   %esi
  236         movl    12(%esp),%edi
  237         movl    16(%esp),%esi
  238         movl    20(%esp),%ecx
  239         movl    %edi,%eax
  240         shrl    $2,%ecx                         /* copy by 32-bit words */
  241         cld                                     /* nope, copy forwards */
  242         rep
  243         movsl
  244         movl    20(%esp),%ecx
  245         andl    $3,%ecx                         /* any bytes left? */
  246         rep
  247         movsb
  248         popl    %esi
  249         popl    %edi
  250         ret
  251 END(memcpy)
  252 
  253 /*****************************************************************************/
  254 /* copyout and fubyte family                                                 */
  255 /*****************************************************************************/
  256 /*
  257  * Access user memory from inside the kernel. These routines and possibly
  258  * the math- and DOS emulators should be the only places that do this.
  259  *
  260  * We have to access the memory with user's permissions, so use a segment
  261  * selector with RPL 3. For writes to user space we have to additionally
  262  * check the PTE for write permission, because the 386 does not check
  263  * write permissions when we are executing with EPL 0. The 486 does check
  264  * this if the WP bit is set in CR0, so we can use a simpler version here.
  265  *
  266  * These routines set curpcb->pcb_onfault for the time they execute. When a
  267  * protection violation occurs inside the functions, the trap handler
  268  * returns to *curpcb->pcb_onfault instead of the function.
  269  */
  270 
  271 /*
  272  * copyout(from_kernel, to_user, len)  - MP SAFE
  273  */
  274 ENTRY(copyout)
  275         movl    PCPU(CURPCB),%eax
  276         movl    $copyout_fault,PCB_ONFAULT(%eax)
  277         pushl   %esi
  278         pushl   %edi
  279         pushl   %ebx
  280         movl    16(%esp),%esi
  281         movl    20(%esp),%edi
  282         movl    24(%esp),%ebx
  283         testl   %ebx,%ebx                       /* anything to do? */
  284         jz      done_copyout
  285 
  286         /*
  287          * Check explicitly for non-user addresses.  If 486 write protection
  288          * is being used, this check is essential because we are in kernel
  289          * mode so the h/w does not provide any protection against writing
  290          * kernel addresses.
  291          */
  292 
  293         /*
  294          * First, prevent address wrapping.
  295          */
  296         movl    %edi,%eax
  297         addl    %ebx,%eax
  298         jc      copyout_fault
  299 /*
  300  * XXX STOP USING VM_MAXUSER_ADDRESS.
  301  * It is an end address, not a max, so every time it is used correctly it
  302  * looks like there is an off by one error, and of course it caused an off
  303  * by one error in several places.
  304  */
  305         cmpl    $VM_MAXUSER_ADDRESS,%eax
  306         ja      copyout_fault
  307 
  308         /* bcopy(%esi, %edi, %ebx) */
  309         movl    %ebx,%ecx
  310 
  311         shrl    $2,%ecx
  312         cld
  313         rep
  314         movsl
  315         movb    %bl,%cl
  316         andb    $3,%cl
  317         rep
  318         movsb
  319 
  320 done_copyout:
  321         popl    %ebx
  322         popl    %edi
  323         popl    %esi
  324         xorl    %eax,%eax
  325         movl    PCPU(CURPCB),%edx
  326         movl    %eax,PCB_ONFAULT(%edx)
  327         ret
  328 END(copyout)
  329 
  330         ALIGN_TEXT
  331 copyout_fault:
  332         popl    %ebx
  333         popl    %edi
  334         popl    %esi
  335         movl    PCPU(CURPCB),%edx
  336         movl    $0,PCB_ONFAULT(%edx)
  337         movl    $EFAULT,%eax
  338         ret
  339 
  340 /*
  341  * copyin(from_user, to_kernel, len) - MP SAFE
  342  */
  343 ENTRY(copyin)
  344         movl    PCPU(CURPCB),%eax
  345         movl    $copyin_fault,PCB_ONFAULT(%eax)
  346         pushl   %esi
  347         pushl   %edi
  348         movl    12(%esp),%esi                   /* caddr_t from */
  349         movl    16(%esp),%edi                   /* caddr_t to */
  350         movl    20(%esp),%ecx                   /* size_t  len */
  351 
  352         /*
  353          * make sure address is valid
  354          */
  355         movl    %esi,%edx
  356         addl    %ecx,%edx
  357         jc      copyin_fault
  358         cmpl    $VM_MAXUSER_ADDRESS,%edx
  359         ja      copyin_fault
  360 
  361         movb    %cl,%al
  362         shrl    $2,%ecx                         /* copy longword-wise */
  363         cld
  364         rep
  365         movsl
  366         movb    %al,%cl
  367         andb    $3,%cl                          /* copy remaining bytes */
  368         rep
  369         movsb
  370 
  371         popl    %edi
  372         popl    %esi
  373         xorl    %eax,%eax
  374         movl    PCPU(CURPCB),%edx
  375         movl    %eax,PCB_ONFAULT(%edx)
  376         ret
  377 END(copyin)
  378 
  379         ALIGN_TEXT
  380 copyin_fault:
  381         popl    %edi
  382         popl    %esi
  383         movl    PCPU(CURPCB),%edx
  384         movl    $0,PCB_ONFAULT(%edx)
  385         movl    $EFAULT,%eax
  386         ret
  387 
  388 /*
  389  * casueword.  Compare and set user word.  Returns -1 on fault,
  390  * 0 on non-faulting access.  The current value is in *oldp.
  391  */
  392 ALTENTRY(casueword32)
  393 ENTRY(casueword)
  394         movl    PCPU(CURPCB),%ecx
  395         movl    $fusufault,PCB_ONFAULT(%ecx)
  396         movl    4(%esp),%edx                    /* dst */
  397         movl    8(%esp),%eax                    /* old */
  398         movl    16(%esp),%ecx                   /* new */
  399 
  400         cmpl    $VM_MAXUSER_ADDRESS-4,%edx      /* verify address is valid */
  401         ja      fusufault
  402 
  403 #ifdef SMP
  404         lock
  405 #endif
  406         cmpxchgl %ecx,(%edx)                    /* Compare and set. */
  407 
  408         /*
  409          * The old value is in %eax.  If the store succeeded it will be the
  410          * value we expected (old) from before the store, otherwise it will
  411          * be the current value.
  412          */
  413 
  414         movl    PCPU(CURPCB),%ecx
  415         movl    $0,PCB_ONFAULT(%ecx)
  416         movl    12(%esp),%edx                   /* oldp */
  417         movl    %eax,(%edx)
  418         xorl    %eax,%eax
  419         ret
  420 END(casueword32)
  421 END(casueword)
  422 
  423 /*
  424  * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
  425  * memory.
  426  */
  427 
  428 ALTENTRY(fueword32)
  429 ENTRY(fueword)
  430         movl    PCPU(CURPCB),%ecx
  431         movl    $fusufault,PCB_ONFAULT(%ecx)
  432         movl    4(%esp),%edx                    /* from */
  433 
  434         cmpl    $VM_MAXUSER_ADDRESS-4,%edx      /* verify address is valid */
  435         ja      fusufault
  436 
  437         movl    (%edx),%eax
  438         movl    $0,PCB_ONFAULT(%ecx)
  439         movl    8(%esp),%edx
  440         movl    %eax,(%edx)
  441         xorl    %eax,%eax
  442         ret
  443 END(fueword32)
  444 END(fueword)
  445 
  446 /*
  447  * fuswintr() and suswintr() are specialized variants of fuword16() and
  448  * suword16(), respectively.  They are called from the profiling code,
  449  * potentially at interrupt time.  If they fail, that's okay; good things
  450  * will happen later.  They always fail for now, until the trap code is
  451  * able to deal with this.
  452  */
  453 ALTENTRY(suswintr)
  454 ENTRY(fuswintr)
  455         movl    $-1,%eax
  456         ret
  457 END(suswintr)
  458 END(fuswintr)
  459 
  460 ENTRY(fuword16)
  461         movl    PCPU(CURPCB),%ecx
  462         movl    $fusufault,PCB_ONFAULT(%ecx)
  463         movl    4(%esp),%edx
  464 
  465         cmpl    $VM_MAXUSER_ADDRESS-2,%edx
  466         ja      fusufault
  467 
  468         movzwl  (%edx),%eax
  469         movl    $0,PCB_ONFAULT(%ecx)
  470         ret
  471 END(fuword16)
  472 
  473 ENTRY(fubyte)
  474         movl    PCPU(CURPCB),%ecx
  475         movl    $fusufault,PCB_ONFAULT(%ecx)
  476         movl    4(%esp),%edx
  477 
  478         cmpl    $VM_MAXUSER_ADDRESS-1,%edx
  479         ja      fusufault
  480 
  481         movzbl  (%edx),%eax
  482         movl    $0,PCB_ONFAULT(%ecx)
  483         ret
  484 END(fubyte)
  485 
  486         ALIGN_TEXT
  487 fusufault:
  488         movl    PCPU(CURPCB),%ecx
  489         xorl    %eax,%eax
  490         movl    %eax,PCB_ONFAULT(%ecx)
  491         decl    %eax
  492         ret
  493 
  494 /*
  495  * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
  496  * All these functions are MPSAFE.
  497  */
  498 
  499 ALTENTRY(suword32)
  500 ENTRY(suword)
  501         movl    PCPU(CURPCB),%ecx
  502         movl    $fusufault,PCB_ONFAULT(%ecx)
  503         movl    4(%esp),%edx
  504 
  505         cmpl    $VM_MAXUSER_ADDRESS-4,%edx      /* verify address validity */
  506         ja      fusufault
  507 
  508         movl    8(%esp),%eax
  509         movl    %eax,(%edx)
  510         xorl    %eax,%eax
  511         movl    PCPU(CURPCB),%ecx
  512         movl    %eax,PCB_ONFAULT(%ecx)
  513         ret
  514 END(suword32)
  515 END(suword)
  516 
  517 ENTRY(suword16)
  518         movl    PCPU(CURPCB),%ecx
  519         movl    $fusufault,PCB_ONFAULT(%ecx)
  520         movl    4(%esp),%edx
  521 
  522         cmpl    $VM_MAXUSER_ADDRESS-2,%edx      /* verify address validity */
  523         ja      fusufault
  524 
  525         movw    8(%esp),%ax
  526         movw    %ax,(%edx)
  527         xorl    %eax,%eax
  528         movl    PCPU(CURPCB),%ecx               /* restore trashed register */
  529         movl    %eax,PCB_ONFAULT(%ecx)
  530         ret
  531 END(suword16)
  532 
  533 ENTRY(subyte)
  534         movl    PCPU(CURPCB),%ecx
  535         movl    $fusufault,PCB_ONFAULT(%ecx)
  536         movl    4(%esp),%edx
  537 
  538         cmpl    $VM_MAXUSER_ADDRESS-1,%edx      /* verify address validity */
  539         ja      fusufault
  540 
  541         movb    8(%esp),%al
  542         movb    %al,(%edx)
  543         xorl    %eax,%eax
  544         movl    PCPU(CURPCB),%ecx               /* restore trashed register */
  545         movl    %eax,PCB_ONFAULT(%ecx)
  546         ret
  547 END(subyte)
  548 
  549 /*
  550  * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
  551  *
  552  *      copy a string from from to to, stop when a 0 character is reached.
  553  *      return ENAMETOOLONG if string is longer than maxlen, and
  554  *      EFAULT on protection violations. If lencopied is non-zero,
  555  *      return the actual length in *lencopied.
  556  */
  557 ENTRY(copyinstr)
  558         pushl   %esi
  559         pushl   %edi
  560         movl    PCPU(CURPCB),%ecx
  561         movl    $cpystrflt,PCB_ONFAULT(%ecx)
  562 
  563         movl    12(%esp),%esi                   /* %esi = from */
  564         movl    16(%esp),%edi                   /* %edi = to */
  565         movl    20(%esp),%edx                   /* %edx = maxlen */
  566 
  567         movl    $VM_MAXUSER_ADDRESS,%eax
  568 
  569         /* make sure 'from' is within bounds */
  570         subl    %esi,%eax
  571         jbe     cpystrflt
  572 
  573         /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
  574         cmpl    %edx,%eax
  575         jae     1f
  576         movl    %eax,%edx
  577         movl    %eax,20(%esp)
  578 1:
  579         incl    %edx
  580         cld
  581 
  582 2:
  583         decl    %edx
  584         jz      3f
  585 
  586         lodsb
  587         stosb
  588         orb     %al,%al
  589         jnz     2b
  590 
  591         /* Success -- 0 byte reached */
  592         decl    %edx
  593         xorl    %eax,%eax
  594         jmp     cpystrflt_x
  595 3:
  596         /* edx is zero - return ENAMETOOLONG or EFAULT */
  597         cmpl    $VM_MAXUSER_ADDRESS,%esi
  598         jae     cpystrflt
  599 4:
  600         movl    $ENAMETOOLONG,%eax
  601         jmp     cpystrflt_x
  602 
  603 cpystrflt:
  604         movl    $EFAULT,%eax
  605 
  606 cpystrflt_x:
  607         /* set *lencopied and return %eax */
  608         movl    PCPU(CURPCB),%ecx
  609         movl    $0,PCB_ONFAULT(%ecx)
  610         movl    20(%esp),%ecx
  611         subl    %edx,%ecx
  612         movl    24(%esp),%edx
  613         testl   %edx,%edx
  614         jz      1f
  615         movl    %ecx,(%edx)
  616 1:
  617         popl    %edi
  618         popl    %esi
  619         ret
  620 END(copyinstr)
  621 
  622 /*
  623  * copystr(from, to, maxlen, int *lencopied) - MP SAFE
  624  */
  625 ENTRY(copystr)
  626         pushl   %esi
  627         pushl   %edi
  628 
  629         movl    12(%esp),%esi                   /* %esi = from */
  630         movl    16(%esp),%edi                   /* %edi = to */
  631         movl    20(%esp),%edx                   /* %edx = maxlen */
  632         incl    %edx
  633         cld
  634 1:
  635         decl    %edx
  636         jz      4f
  637         lodsb
  638         stosb
  639         orb     %al,%al
  640         jnz     1b
  641 
  642         /* Success -- 0 byte reached */
  643         decl    %edx
  644         xorl    %eax,%eax
  645         jmp     6f
  646 4:
  647         /* edx is zero -- return ENAMETOOLONG */
  648         movl    $ENAMETOOLONG,%eax
  649 
  650 6:
  651         /* set *lencopied and return %eax */
  652         movl    20(%esp),%ecx
  653         subl    %edx,%ecx
  654         movl    24(%esp),%edx
  655         testl   %edx,%edx
  656         jz      7f
  657         movl    %ecx,(%edx)
  658 7:
  659         popl    %edi
  660         popl    %esi
  661         ret
  662 END(copystr)
  663 
  664 ENTRY(bcmp)
  665         pushl   %edi
  666         pushl   %esi
  667         movl    12(%esp),%edi
  668         movl    16(%esp),%esi
  669         movl    20(%esp),%edx
  670 
  671         movl    %edx,%ecx
  672         shrl    $2,%ecx
  673         cld                                     /* compare forwards */
  674         repe
  675         cmpsl
  676         jne     1f
  677 
  678         movl    %edx,%ecx
  679         andl    $3,%ecx
  680         repe
  681         cmpsb
  682 1:
  683         setne   %al
  684         movsbl  %al,%eax
  685         popl    %esi
  686         popl    %edi
  687         ret
  688 END(bcmp)
  689 
  690 /*
  691  * Handling of special 386 registers and descriptor tables etc
  692  */
  693 /* void lgdt(struct region_descriptor *rdp); */
  694 ENTRY(lgdt)
  695         /* reload the descriptor table */
  696         movl    4(%esp),%eax
  697         lgdt    (%eax)
  698 
  699         /* flush the prefetch q */
  700         jmp     1f
  701         nop
  702 1:
  703         /* reload "stale" selectors */
  704         movl    $KDSEL,%eax
  705         movl    %eax,%ds
  706         movl    %eax,%es
  707         movl    %eax,%gs
  708         movl    %eax,%ss
  709         movl    $KPSEL,%eax
  710         movl    %eax,%fs
  711 
  712         /* reload code selector by turning return into intersegmental return */
  713         movl    (%esp),%eax
  714         pushl   %eax
  715         movl    $KCSEL,4(%esp)
  716         MEXITCOUNT
  717         lret
  718 END(lgdt)
  719 
  720 /* ssdtosd(*ssdp,*sdp) */
  721 ENTRY(ssdtosd)
  722         pushl   %ebx
  723         movl    8(%esp),%ecx
  724         movl    8(%ecx),%ebx
  725         shll    $16,%ebx
  726         movl    (%ecx),%edx
  727         roll    $16,%edx
  728         movb    %dh,%bl
  729         movb    %dl,%bh
  730         rorl    $8,%ebx
  731         movl    4(%ecx),%eax
  732         movw    %ax,%dx
  733         andl    $0xf0000,%eax
  734         orl     %eax,%ebx
  735         movl    12(%esp),%ecx
  736         movl    %edx,(%ecx)
  737         movl    %ebx,4(%ecx)
  738         popl    %ebx
  739         ret
  740 END(ssdtosd)
  741 
  742 /* void reset_dbregs() */
  743 ENTRY(reset_dbregs)
  744         movl    $0,%eax
  745         movl    %eax,%dr7       /* disable all breakpoints first */
  746         movl    %eax,%dr0
  747         movl    %eax,%dr1
  748         movl    %eax,%dr2
  749         movl    %eax,%dr3
  750         movl    %eax,%dr6
  751         ret
  752 END(reset_dbregs)
  753 
  754 /*****************************************************************************/
  755 /* setjump, longjump                                                         */
  756 /*****************************************************************************/
  757 
  758 ENTRY(setjmp)
  759         movl    4(%esp),%eax
  760         movl    %ebx,(%eax)                     /* save ebx */
  761         movl    %esp,4(%eax)                    /* save esp */
  762         movl    %ebp,8(%eax)                    /* save ebp */
  763         movl    %esi,12(%eax)                   /* save esi */
  764         movl    %edi,16(%eax)                   /* save edi */
  765         movl    (%esp),%edx                     /* get rta */
  766         movl    %edx,20(%eax)                   /* save eip */
  767         xorl    %eax,%eax                       /* return(0); */
  768         ret
  769 END(setjmp)
  770 
  771 ENTRY(longjmp)
  772         movl    4(%esp),%eax
  773         movl    (%eax),%ebx                     /* restore ebx */
  774         movl    4(%eax),%esp                    /* restore esp */
  775         movl    8(%eax),%ebp                    /* restore ebp */
  776         movl    12(%eax),%esi                   /* restore esi */
  777         movl    16(%eax),%edi                   /* restore edi */
  778         movl    20(%eax),%edx                   /* get rta */
  779         movl    %edx,(%esp)                     /* put in return frame */
  780         xorl    %eax,%eax                       /* return(1); */
  781         incl    %eax
  782         ret
  783 END(longjmp)
  784 
  785 /*
  786  * Support for reading MSRs in the safe manner.
  787  */
  788 ENTRY(rdmsr_safe)
  789 /* int rdmsr_safe(u_int msr, uint64_t *data) */
  790         movl    PCPU(CURPCB),%ecx
  791         movl    $msr_onfault,PCB_ONFAULT(%ecx)
  792 
  793         movl    4(%esp),%ecx
  794         rdmsr
  795         movl    8(%esp),%ecx
  796         movl    %eax,(%ecx)
  797         movl    %edx,4(%ecx)
  798         xorl    %eax,%eax
  799 
  800         movl    PCPU(CURPCB),%ecx
  801         movl    %eax,PCB_ONFAULT(%ecx)
  802 
  803         ret
  804 
  805 /*
  806  * Support for writing MSRs in the safe manner.
  807  */
  808 ENTRY(wrmsr_safe)
  809 /* int wrmsr_safe(u_int msr, uint64_t data) */
  810         movl    PCPU(CURPCB),%ecx
  811         movl    $msr_onfault,PCB_ONFAULT(%ecx)
  812 
  813         movl    4(%esp),%ecx
  814         movl    8(%esp),%eax
  815         movl    12(%esp),%edx
  816         wrmsr
  817         xorl    %eax,%eax
  818 
  819         movl    PCPU(CURPCB),%ecx
  820         movl    %eax,PCB_ONFAULT(%ecx)
  821 
  822         ret
  823 
  824 /*
  825  * MSR operations fault handler
  826  */
  827         ALIGN_TEXT
  828 msr_onfault:
  829         movl    PCPU(CURPCB),%ecx
  830         movl    $0,PCB_ONFAULT(%ecx)
  831         movl    $EFAULT,%eax
  832         ret
  833 
  834 ENTRY(handle_ibrs_entry)
  835         ret
  836 END(handle_ibrs_entry)
  837 
  838 ENTRY(handle_ibrs_exit)
  839         ret
  840 END(handle_ibrs_exit)

Cache object: 15a1ab1fd37f2c61aab60e03ced721bb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.