The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/support.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1993 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 #include <machine/asmacros.h>
   33 #include <machine/cputypes.h>
   34 #include <machine/pmap.h>
   35 #include <machine/specialreg.h>
   36 
   37 #include "assym.inc"
   38 
   39 #define IDXSHIFT        10
   40 
   41         .text
   42 
   43 /*
   44  * bcopy family
   45  * void bzero(void *buf, u_int len)
   46  */
   47 ENTRY(bzero)
   48         pushl   %edi
   49         movl    8(%esp),%edi
   50         movl    12(%esp),%ecx
   51         xorl    %eax,%eax
   52         shrl    $2,%ecx
   53         rep
   54         stosl
   55         movl    12(%esp),%ecx
   56         andl    $3,%ecx
   57         rep
   58         stosb
   59         popl    %edi
   60         ret
   61 END(bzero)
   62 
   63 ENTRY(sse2_pagezero)
   64         pushl   %ebx
   65         movl    8(%esp),%ecx
   66         movl    %ecx,%eax
   67         addl    $4096,%eax
   68         xor     %ebx,%ebx
   69         jmp     1f
   70         /*
   71          * The loop takes 14 bytes.  Ensure that it doesn't cross a 16-byte
   72          * cache line.
   73          */
   74         .p2align 4,0x90
   75 1:
   76         movnti  %ebx,(%ecx)
   77         movnti  %ebx,4(%ecx)
   78         addl    $8,%ecx
   79         cmpl    %ecx,%eax
   80         jne     1b
   81         sfence
   82         popl    %ebx
   83         ret
   84 END(sse2_pagezero)
   85 
   86 ENTRY(i686_pagezero)
   87         pushl   %edi
   88         pushl   %ebx
   89 
   90         movl    12(%esp),%edi
   91         movl    $1024,%ecx
   92 
   93         ALIGN_TEXT
   94 1:
   95         xorl    %eax,%eax
   96         repe
   97         scasl
   98         jnz     2f
   99 
  100         popl    %ebx
  101         popl    %edi
  102         ret
  103 
  104         ALIGN_TEXT
  105 
  106 2:
  107         incl    %ecx
  108         subl    $4,%edi
  109 
  110         movl    %ecx,%edx
  111         cmpl    $16,%ecx
  112 
  113         jge     3f
  114 
  115         movl    %edi,%ebx
  116         andl    $0x3f,%ebx
  117         shrl    %ebx
  118         shrl    %ebx
  119         movl    $16,%ecx
  120         subl    %ebx,%ecx
  121 
  122 3:
  123         subl    %ecx,%edx
  124         rep
  125         stosl
  126 
  127         movl    %edx,%ecx
  128         testl   %edx,%edx
  129         jnz     1b
  130 
  131         popl    %ebx
  132         popl    %edi
  133         ret
  134 END(i686_pagezero)
  135 
  136 /* fillw(pat, base, cnt) */
  137 ENTRY(fillw)
  138         pushl   %edi
  139         movl    8(%esp),%eax
  140         movl    12(%esp),%edi
  141         movl    16(%esp),%ecx
  142         rep
  143         stosw
  144         popl    %edi
  145         ret
  146 END(fillw)
  147 
  148 /*
  149  * memmove(dst, src, cnt) (return dst)
  150  * bcopy(src, dst, cnt)
  151  *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
  152  */
  153 ENTRY(bcopy)
  154         movl    4(%esp),%eax
  155         movl    8(%esp),%edx
  156         movl    %eax,8(%esp)
  157         movl    %edx,4(%esp)
  158         MEXITCOUNT
  159         jmp     memmove
  160 END(bcopy)
  161 
  162 ENTRY(memmove)
  163         pushl   %ebp
  164         movl    %esp,%ebp
  165         pushl   %esi
  166         pushl   %edi
  167         movl    8(%ebp),%edi
  168         movl    12(%ebp),%esi
  169 1:
  170         movl    16(%ebp),%ecx
  171 
  172         movl    %edi,%eax
  173         subl    %esi,%eax
  174         cmpl    %ecx,%eax                       /* overlapping && src < dst? */
  175         jb      1f
  176 
  177         shrl    $2,%ecx                         /* copy by 32-bit words */
  178         rep
  179         movsl
  180         movl    16(%ebp),%ecx
  181         andl    $3,%ecx                         /* any bytes left? */
  182         rep
  183         movsb
  184         popl    %edi
  185         popl    %esi
  186         movl    8(%ebp),%eax                    /* return dst for memmove */
  187         popl    %ebp
  188         ret
  189 
  190         ALIGN_TEXT
  191 1:
  192         addl    %ecx,%edi                       /* copy backwards */
  193         addl    %ecx,%esi
  194         decl    %edi
  195         decl    %esi
  196         andl    $3,%ecx                         /* any fractional bytes? */
  197         std
  198         rep
  199         movsb
  200         movl    16(%ebp),%ecx                   /* copy remainder by 32-bit words */
  201         shrl    $2,%ecx
  202         subl    $3,%esi
  203         subl    $3,%edi
  204         rep
  205         movsl
  206         popl    %edi
  207         popl    %esi
  208         cld
  209         movl    8(%ebp),%eax                    /* return dst for memmove */
  210         popl    %ebp
  211         ret
  212 END(memmove)
  213 
  214 /*
  215  * Note: memcpy does not support overlapping copies
  216  */
  217 ENTRY(memcpy)
  218         pushl   %edi
  219         pushl   %esi
  220         movl    12(%esp),%edi
  221         movl    16(%esp),%esi
  222         movl    20(%esp),%ecx
  223         movl    %edi,%eax
  224         shrl    $2,%ecx                         /* copy by 32-bit words */
  225         rep
  226         movsl
  227         movl    20(%esp),%ecx
  228         andl    $3,%ecx                         /* any bytes left? */
  229         rep
  230         movsb
  231         popl    %esi
  232         popl    %edi
  233         ret
  234 END(memcpy)
  235 
  236 ENTRY(bcmp)
  237         pushl   %edi
  238         pushl   %esi
  239         movl    12(%esp),%edi
  240         movl    16(%esp),%esi
  241         movl    20(%esp),%edx
  242 
  243         movl    %edx,%ecx
  244         shrl    $2,%ecx
  245         repe
  246         cmpsl
  247         jne     1f
  248 
  249         movl    %edx,%ecx
  250         andl    $3,%ecx
  251         repe
  252         cmpsb
  253 1:
  254         setne   %al
  255         movsbl  %al,%eax
  256         popl    %esi
  257         popl    %edi
  258         ret
  259 END(bcmp)
  260 
  261 /*
  262  * Handling of special 386 registers and descriptor tables etc
  263  */
  264 /* void lgdt(struct region_descriptor *rdp); */
  265 ENTRY(lgdt)
  266         /* reload the descriptor table */
  267         movl    4(%esp),%eax
  268         lgdt    (%eax)
  269 
  270         /* flush the prefetch q */
  271         jmp     1f
  272         nop
  273 1:
  274         /* reload "stale" selectors */
  275         movl    $KDSEL,%eax
  276         movl    %eax,%ds
  277         movl    %eax,%es
  278         movl    %eax,%gs
  279         movl    %eax,%ss
  280         movl    $KPSEL,%eax
  281         movl    %eax,%fs
  282 
  283         /* reload code selector by turning return into intersegmental return */
  284         movl    (%esp),%eax
  285         pushl   %eax
  286         movl    $KCSEL,4(%esp)
  287         MEXITCOUNT
  288         lret
  289 END(lgdt)
  290 
  291 /* ssdtosd(*ssdp,*sdp) */
  292 ENTRY(ssdtosd)
  293         pushl   %ebx
  294         movl    8(%esp),%ecx
  295         movl    8(%ecx),%ebx
  296         shll    $16,%ebx
  297         movl    (%ecx),%edx
  298         roll    $16,%edx
  299         movb    %dh,%bl
  300         movb    %dl,%bh
  301         rorl    $8,%ebx
  302         movl    4(%ecx),%eax
  303         movw    %ax,%dx
  304         andl    $0xf0000,%eax
  305         orl     %eax,%ebx
  306         movl    12(%esp),%ecx
  307         movl    %edx,(%ecx)
  308         movl    %ebx,4(%ecx)
  309         popl    %ebx
  310         ret
  311 END(ssdtosd)
  312 
  313 /* void reset_dbregs() */
  314 ENTRY(reset_dbregs)
  315         movl    $0,%eax
  316         movl    %eax,%dr7       /* disable all breakpoints first */
  317         movl    %eax,%dr0
  318         movl    %eax,%dr1
  319         movl    %eax,%dr2
  320         movl    %eax,%dr3
  321         movl    %eax,%dr6
  322         ret
  323 END(reset_dbregs)
  324 
  325 /*****************************************************************************/
  326 /* setjump, longjump                                                         */
  327 /*****************************************************************************/
  328 
  329 ENTRY(setjmp)
  330         movl    4(%esp),%eax
  331         movl    %ebx,(%eax)                     /* save ebx */
  332         movl    %esp,4(%eax)                    /* save esp */
  333         movl    %ebp,8(%eax)                    /* save ebp */
  334         movl    %esi,12(%eax)                   /* save esi */
  335         movl    %edi,16(%eax)                   /* save edi */
  336         movl    (%esp),%edx                     /* get rta */
  337         movl    %edx,20(%eax)                   /* save eip */
  338         xorl    %eax,%eax                       /* return(0); */
  339         ret
  340 END(setjmp)
  341 
  342 ENTRY(longjmp)
  343         movl    4(%esp),%eax
  344         movl    (%eax),%ebx                     /* restore ebx */
  345         movl    4(%eax),%esp                    /* restore esp */
  346         movl    8(%eax),%ebp                    /* restore ebp */
  347         movl    12(%eax),%esi                   /* restore esi */
  348         movl    16(%eax),%edi                   /* restore edi */
  349         movl    20(%eax),%edx                   /* get rta */
  350         movl    %edx,(%esp)                     /* put in return frame */
  351         xorl    %eax,%eax                       /* return(1); */
  352         incl    %eax
  353         ret
  354 END(longjmp)
  355 
  356 /*
  357  * Support for reading MSRs in the safe manner.  (Instead of panic on #gp,
  358  * return an error.)
  359  */
  360 ENTRY(rdmsr_safe)
  361 /* int rdmsr_safe(u_int msr, uint64_t *data) */
  362         movl    PCPU(CURPCB),%ecx
  363         movl    $msr_onfault,PCB_ONFAULT(%ecx)
  364 
  365         movl    4(%esp),%ecx
  366         rdmsr
  367         movl    8(%esp),%ecx
  368         movl    %eax,(%ecx)
  369         movl    %edx,4(%ecx)
  370         xorl    %eax,%eax
  371 
  372         movl    PCPU(CURPCB),%ecx
  373         movl    %eax,PCB_ONFAULT(%ecx)
  374 
  375         ret
  376 
  377 /*
  378  * Support for writing MSRs in the safe manner.  (Instead of panic on #gp,
  379  * return an error.)
  380  */
  381 ENTRY(wrmsr_safe)
  382 /* int wrmsr_safe(u_int msr, uint64_t data) */
  383         movl    PCPU(CURPCB),%ecx
  384         movl    $msr_onfault,PCB_ONFAULT(%ecx)
  385 
  386         movl    4(%esp),%ecx
  387         movl    8(%esp),%eax
  388         movl    12(%esp),%edx
  389         wrmsr
  390         xorl    %eax,%eax
  391 
  392         movl    PCPU(CURPCB),%ecx
  393         movl    %eax,PCB_ONFAULT(%ecx)
  394 
  395         ret
  396 
  397 /*
  398  * MSR operations fault handler
  399  */
  400         ALIGN_TEXT
  401 msr_onfault:
  402         movl    PCPU(CURPCB),%ecx
  403         movl    $0,PCB_ONFAULT(%ecx)
  404         movl    $EFAULT,%eax
  405         ret
  406 
  407         .altmacro
  408         .macro  rsb_seq_label l
  409 rsb_seq_\l:
  410         .endm
  411         .macro  rsb_call_label l
  412         call    rsb_seq_\l
  413         .endm
  414         .macro  rsb_seq count
  415         ll=1
  416         .rept   \count
  417         rsb_call_label  %(ll)
  418         nop
  419         rsb_seq_label %(ll)
  420         addl    $4,%esp
  421         ll=ll+1
  422         .endr
  423         .endm
  424 
  425 ENTRY(rsb_flush)
  426         rsb_seq 32
  427         ret
  428 
  429 ENTRY(handle_ibrs_entry)
  430         cmpb    $0,hw_ibrs_ibpb_active
  431         je      1f
  432         movl    $MSR_IA32_SPEC_CTRL,%ecx
  433         rdmsr
  434         orl     $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
  435         orl     $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
  436         wrmsr
  437         movb    $1,PCPU(IBPB_SET)
  438         /*
  439          * i386 does not implement SMEP.
  440          */
  441 1:      jmp     rsb_flush
  442 END(handle_ibrs_entry)
  443 
  444 ENTRY(handle_ibrs_exit)
  445         cmpb    $0,PCPU(IBPB_SET)
  446         je      1f
  447         movl    $MSR_IA32_SPEC_CTRL,%ecx
  448         rdmsr
  449         andl    $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
  450         andl    $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
  451         wrmsr
  452         movb    $0,PCPU(IBPB_SET)
  453 1:      ret
  454 END(handle_ibrs_exit)
  455 
  456 ENTRY(mds_handler_void)
  457         ret
  458 END(mds_handler_void)
  459 
  460 ENTRY(mds_handler_verw)
  461         subl    $4, %esp
  462         movw    %ds, (%esp)
  463         verw    (%esp)
  464         addl    $4, %esp
  465         ret
  466 END(mds_handler_verw)
  467 
  468 ENTRY(mds_handler_ivb)
  469         movl    %cr0, %eax
  470         testb   $CR0_TS, %al
  471         je      1f
  472         clts
  473 1:      movl    PCPU(MDS_BUF), %edx
  474         movdqa  %xmm0, PCPU(MDS_TMP)
  475         pxor    %xmm0, %xmm0
  476 
  477         lfence
  478         orpd    (%edx), %xmm0
  479         orpd    (%edx), %xmm0
  480         mfence
  481         movl    $40, %ecx
  482         addl    $16, %edx
  483 2:      movntdq %xmm0, (%edx)
  484         addl    $16, %edx
  485         decl    %ecx
  486         jnz     2b
  487         mfence
  488 
  489         movdqa  PCPU(MDS_TMP),%xmm0
  490         testb   $CR0_TS, %al
  491         je      3f
  492         movl    %eax, %cr0
  493 3:      ret
  494 END(mds_handler_ivb)
  495 
  496 ENTRY(mds_handler_bdw)
  497         movl    %cr0, %eax
  498         testb   $CR0_TS, %al
  499         je      1f
  500         clts
  501 1:      movl    PCPU(MDS_BUF), %ebx
  502         movdqa  %xmm0, PCPU(MDS_TMP)
  503         pxor    %xmm0, %xmm0
  504 
  505         movl    %ebx, %edi
  506         movl    %ebx, %esi
  507         movl    $40, %ecx
  508 2:      movntdq %xmm0, (%ebx)
  509         addl    $16, %ebx
  510         decl    %ecx
  511         jnz     2b
  512         mfence
  513         movl    $1536, %ecx
  514         rep; movsb
  515         lfence
  516 
  517         movdqa  PCPU(MDS_TMP),%xmm0
  518         testb   $CR0_TS, %al
  519         je      3f
  520         movl    %eax, %cr0
  521 3:      ret
  522 END(mds_handler_bdw)
  523 
  524 ENTRY(mds_handler_skl_sse)
  525         movl    %cr0, %eax
  526         testb   $CR0_TS, %al
  527         je      1f
  528         clts
  529 1:      movl    PCPU(MDS_BUF), %edi
  530         movl    PCPU(MDS_BUF64), %edx
  531         movdqa  %xmm0, PCPU(MDS_TMP)
  532         pxor    %xmm0, %xmm0
  533 
  534         lfence
  535         orpd    (%edx), %xmm0
  536         orpd    (%edx), %xmm0
  537         xorl    %eax, %eax
  538 2:      clflushopt      5376(%edi, %eax, 8)
  539         addl    $8, %eax
  540         cmpl    $8 * 12, %eax
  541         jb      2b
  542         sfence
  543         movl    $6144, %ecx
  544         xorl    %eax, %eax
  545         rep; stosb
  546         mfence
  547 
  548         movdqa  PCPU(MDS_TMP), %xmm0
  549         testb   $CR0_TS, %al
  550         je      3f
  551         movl    %eax, %cr0
  552 3:      ret
  553 END(mds_handler_skl_sse)
  554 
  555 ENTRY(mds_handler_skl_avx)
  556         movl    %cr0, %eax
  557         testb   $CR0_TS, %al
  558         je      1f
  559         clts
  560 1:      movl    PCPU(MDS_BUF), %edi
  561         movl    PCPU(MDS_BUF64), %edx
  562         vmovdqa %ymm0, PCPU(MDS_TMP)
  563         vpxor   %ymm0, %ymm0, %ymm0
  564 
  565         lfence
  566         vorpd   (%edx), %ymm0, %ymm0
  567         vorpd   (%edx), %ymm0, %ymm0
  568         xorl    %eax, %eax
  569 2:      clflushopt      5376(%edi, %eax, 8)
  570         addl    $8, %eax
  571         cmpl    $8 * 12, %eax
  572         jb      2b
  573         sfence
  574         movl    $6144, %ecx
  575         xorl    %eax, %eax
  576         rep; stosb
  577         mfence
  578 
  579         vmovdqa PCPU(MDS_TMP), %ymm0
  580         testb   $CR0_TS, %al
  581         je      3f
  582         movl    %eax, %cr0
  583 3:      ret
  584 END(mds_handler_skl_avx)
  585 
  586 ENTRY(mds_handler_skl_avx512)
  587         movl    %cr0, %eax
  588         testb   $CR0_TS, %al
  589         je      1f
  590         clts
  591 1:      movl    PCPU(MDS_BUF), %edi
  592         movl    PCPU(MDS_BUF64), %edx
  593         vmovdqa64       %zmm0, PCPU(MDS_TMP)
  594         vpxord  %zmm0, %zmm0, %zmm0
  595 
  596         lfence
  597         vorpd   (%edx), %zmm0, %zmm0
  598         vorpd   (%edx), %zmm0, %zmm0
  599         xorl    %eax, %eax
  600 2:      clflushopt      5376(%edi, %eax, 8)
  601         addl    $8, %eax
  602         cmpl    $8 * 12, %eax
  603         jb      2b
  604         sfence
  605         movl    $6144, %ecx
  606         xorl    %eax, %eax
  607         rep; stosb
  608         mfence
  609 
  610         vmovdqa64       PCPU(MDS_TMP), %zmm0
  611         testb   $CR0_TS, %al
  612         je      3f
  613         movl    %eax, %cr0
  614 3:      ret
  615 END(mds_handler_skl_avx512)
  616 
  617 ENTRY(mds_handler_silvermont)
  618         movl    %cr0, %eax
  619         testb   $CR0_TS, %al
  620         je      1f
  621         clts
  622 1:      movl    PCPU(MDS_BUF), %edx
  623         movdqa  %xmm0, PCPU(MDS_TMP)
  624         pxor    %xmm0, %xmm0
  625 
  626         movl    $16, %ecx
  627 2:      movntdq %xmm0, (%edx)
  628         addl    $16, %edx
  629         decl    %ecx
  630         jnz     2b
  631         mfence
  632 
  633         movdqa  PCPU(MDS_TMP),%xmm0
  634         testb   $CR0_TS, %al
  635         je      3f
  636         movl    %eax, %cr0
  637 3:      ret
  638 END(mds_handler_silvermont)

Cache object: 8a80da6e981ddb6069754131ae4d72a1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.