The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/vmm/intel/vmx_support.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2011 NetApp, Inc.
    5  * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 #include <machine/asmacros.h>
   33 #include <machine/specialreg.h>
   34 
   35 #include "vmx_assym.h"
   36 
   37 #ifdef SMP
   38 #define LK      lock ;
   39 #else
   40 #define LK
   41 #endif
   42 
   43 /* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
   44 #define VENTER  push %rbp ; mov %rsp,%rbp
   45 #define VLEAVE  pop %rbp
   46 
   47 /*
   48  * Save the guest context.
   49  */
   50 #define VMX_GUEST_SAVE                                                  \
   51         movq    %rdi,VMXCTX_GUEST_RDI(%rsp);                            \
   52         movq    %rsi,VMXCTX_GUEST_RSI(%rsp);                            \
   53         movq    %rdx,VMXCTX_GUEST_RDX(%rsp);                            \
   54         movq    %rcx,VMXCTX_GUEST_RCX(%rsp);                            \
   55         movq    %r8,VMXCTX_GUEST_R8(%rsp);                              \
   56         movq    %r9,VMXCTX_GUEST_R9(%rsp);                              \
   57         movq    %rax,VMXCTX_GUEST_RAX(%rsp);                            \
   58         movq    %rbx,VMXCTX_GUEST_RBX(%rsp);                            \
   59         movq    %rbp,VMXCTX_GUEST_RBP(%rsp);                            \
   60         movq    %r10,VMXCTX_GUEST_R10(%rsp);                            \
   61         movq    %r11,VMXCTX_GUEST_R11(%rsp);                            \
   62         movq    %r12,VMXCTX_GUEST_R12(%rsp);                            \
   63         movq    %r13,VMXCTX_GUEST_R13(%rsp);                            \
   64         movq    %r14,VMXCTX_GUEST_R14(%rsp);                            \
   65         movq    %r15,VMXCTX_GUEST_R15(%rsp);                            \
   66         movq    %cr2,%rdi;                                              \
   67         movq    %rdi,VMXCTX_GUEST_CR2(%rsp);                            \
   68         movq    %rsp,%rdi;
   69 
   70 /*
   71  * Assumes that %rdi holds a pointer to the 'vmxctx'.
   72  *
   73  * On "return" all registers are updated to reflect guest state. The two
   74  * exceptions are %rip and %rsp. These registers are atomically switched
   75  * by hardware from the guest area of the vmcs.
   76  *
   77  * We modify %rsp to point to the 'vmxctx' so we can use it to restore
   78  * host context in case of an error with 'vmlaunch' or 'vmresume'.
   79  */
   80 #define VMX_GUEST_RESTORE                                               \
   81         movq    %rdi,%rsp;                                              \
   82         movq    VMXCTX_GUEST_CR2(%rdi),%rsi;                            \
   83         movq    %rsi,%cr2;                                              \
   84         movq    VMXCTX_GUEST_RSI(%rdi),%rsi;                            \
   85         movq    VMXCTX_GUEST_RDX(%rdi),%rdx;                            \
   86         movq    VMXCTX_GUEST_RCX(%rdi),%rcx;                            \
   87         movq    VMXCTX_GUEST_R8(%rdi),%r8;                              \
   88         movq    VMXCTX_GUEST_R9(%rdi),%r9;                              \
   89         movq    VMXCTX_GUEST_RAX(%rdi),%rax;                            \
   90         movq    VMXCTX_GUEST_RBX(%rdi),%rbx;                            \
   91         movq    VMXCTX_GUEST_RBP(%rdi),%rbp;                            \
   92         movq    VMXCTX_GUEST_R10(%rdi),%r10;                            \
   93         movq    VMXCTX_GUEST_R11(%rdi),%r11;                            \
   94         movq    VMXCTX_GUEST_R12(%rdi),%r12;                            \
   95         movq    VMXCTX_GUEST_R13(%rdi),%r13;                            \
   96         movq    VMXCTX_GUEST_R14(%rdi),%r14;                            \
   97         movq    VMXCTX_GUEST_R15(%rdi),%r15;                            \
   98         movq    VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
   99 
  100 /*
  101  * Clobber the remaining registers with guest contents so they can't
  102  * be misused.
  103  */
  104 #define VMX_GUEST_CLOBBER                                               \
  105         xor     %rax, %rax;                                             \
  106         xor     %rcx, %rcx;                                             \
  107         xor     %rdx, %rdx;                                             \
  108         xor     %rsi, %rsi;                                             \
  109         xor     %r8, %r8;                                               \
  110         xor     %r9, %r9;                                               \
  111         xor     %r10, %r10;                                             \
  112         xor     %r11, %r11;
  113 
  114 /*
  115  * Save and restore the host context.
  116  *
  117  * Assumes that %rdi holds a pointer to the 'vmxctx'.
  118  */
  119 #define VMX_HOST_SAVE                                                   \
  120         movq    %r15, VMXCTX_HOST_R15(%rdi);                            \
  121         movq    %r14, VMXCTX_HOST_R14(%rdi);                            \
  122         movq    %r13, VMXCTX_HOST_R13(%rdi);                            \
  123         movq    %r12, VMXCTX_HOST_R12(%rdi);                            \
  124         movq    %rbp, VMXCTX_HOST_RBP(%rdi);                            \
  125         movq    %rsp, VMXCTX_HOST_RSP(%rdi);                            \
  126         movq    %rbx, VMXCTX_HOST_RBX(%rdi);                            \
  127 
  128 #define VMX_HOST_RESTORE                                                \
  129         movq    VMXCTX_HOST_R15(%rdi), %r15;                            \
  130         movq    VMXCTX_HOST_R14(%rdi), %r14;                            \
  131         movq    VMXCTX_HOST_R13(%rdi), %r13;                            \
  132         movq    VMXCTX_HOST_R12(%rdi), %r12;                            \
  133         movq    VMXCTX_HOST_RBP(%rdi), %rbp;                            \
  134         movq    VMXCTX_HOST_RSP(%rdi), %rsp;                            \
  135         movq    VMXCTX_HOST_RBX(%rdi), %rbx;                            \
  136 
  137 /*
  138  * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
  139  * %rdi: pointer to the 'vmxctx'
  140  * %rsi: pointer to the 'vmx'
  141  * %edx: launch state of the VMCS
  142  * Interrupts must be disabled on entry.
  143  */
  144 ENTRY(vmx_enter_guest)
  145         VENTER
  146         /*
  147          * Save host state before doing anything else.
  148          */
  149         VMX_HOST_SAVE
  150 
  151 guest_restore:
  152         movl    %edx, %r8d
  153         cmpb    $0, guest_l1d_flush_sw(%rip)
  154         je      after_l1d
  155         call    flush_l1d_sw
  156 after_l1d:
  157         cmpl    $0, %r8d
  158         je      do_launch
  159         VMX_GUEST_RESTORE
  160         vmresume
  161         /*
  162          * In the common case 'vmresume' returns back to the host through
  163          * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
  164          *
  165          * If there is an error we return VMX_VMRESUME_ERROR to the caller.
  166          */
  167         movq    %rsp, %rdi              /* point %rdi back to 'vmxctx' */
  168         movl    $VMX_VMRESUME_ERROR, %eax
  169         jmp     decode_inst_error
  170 
  171 do_launch:
  172         VMX_GUEST_RESTORE
  173         vmlaunch
  174         /*
  175          * In the common case 'vmlaunch' returns back to the host through
  176          * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
  177          *
  178          * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
  179          */
  180         movq    %rsp, %rdi              /* point %rdi back to 'vmxctx' */
  181         movl    $VMX_VMLAUNCH_ERROR, %eax
  182         jmp     decode_inst_error
  183 
  184 decode_inst_error:
  185         movl    $VM_FAIL_VALID, %r11d
  186         jz      inst_error
  187         movl    $VM_FAIL_INVALID, %r11d
  188 inst_error:
  189         movl    %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
  190 
  191         /*
  192          * The return value is already populated in %eax so we cannot use
  193          * it as a scratch register beyond this point.
  194          */
  195 
  196         VMX_HOST_RESTORE
  197         VLEAVE
  198         ret
  199 
  200 /*
  201  * Non-error VM-exit from the guest. Make this a label so it can
  202  * be used by C code when setting up the VMCS.
  203  * The VMCS-restored %rsp points to the struct vmxctx
  204  */
  205         ALIGN_TEXT
  206         .globl  vmx_exit_guest_flush_rsb
  207 vmx_exit_guest_flush_rsb:
  208         /*
  209          * Save guest state that is not automatically saved in the vmcs.
  210          */
  211         VMX_GUEST_SAVE
  212 
  213         VMX_HOST_RESTORE
  214 
  215         VMX_GUEST_CLOBBER
  216 
  217         /*
  218          * To prevent malicious branch target predictions from
  219          * affecting the host, overwrite all entries in the RSB upon
  220          * exiting a guest.
  221          */
  222         mov     $16, %ecx       /* 16 iterations, two calls per loop */
  223         mov     %rsp, %rax
  224 0:      call    2f              /* create an RSB entry. */
  225 1:      pause
  226         call    1b              /* capture rogue speculation. */
  227 2:      call    2f              /* create an RSB entry. */
  228 1:      pause
  229         call    1b              /* capture rogue speculation. */
  230 2:      sub     $1, %ecx
  231         jnz     0b
  232         mov     %rax, %rsp
  233 
  234         /*
  235          * This will return to the caller of 'vmx_enter_guest()' with a return
  236          * value of VMX_GUEST_VMEXIT.
  237          */
  238         movl    $VMX_GUEST_VMEXIT, %eax
  239         VLEAVE
  240         ret
  241 
  242         .globl  vmx_exit_guest
  243 vmx_exit_guest:
  244         /*
  245          * Save guest state that is not automatically saved in the vmcs.
  246          */
  247         VMX_GUEST_SAVE
  248 
  249         VMX_HOST_RESTORE
  250 
  251         VMX_GUEST_CLOBBER
  252 
  253         /*
  254          * This will return to the caller of 'vmx_enter_guest()' with a return
  255          * value of VMX_GUEST_VMEXIT.
  256          */
  257         movl    $VMX_GUEST_VMEXIT, %eax
  258         VLEAVE
  259         ret
  260 END(vmx_enter_guest)
  261 
  262 /*
  263  * %rdi = interrupt handler entry point
  264  *
  265  * Calling sequence described in the "Instruction Set Reference" for the "INT"
  266  * instruction in Intel SDM, Vol 2.
  267  */
  268 ENTRY(vmx_call_isr)
  269         VENTER
  270         mov     %rsp, %r11                      /* save %rsp */
  271         and     $~0xf, %rsp                     /* align on 16-byte boundary */
  272         pushq   $KERNEL_SS                      /* %ss */
  273         pushq   %r11                            /* %rsp */
  274         pushfq                                  /* %rflags */
  275         pushq   $KERNEL_CS                      /* %cs */
  276         cli                                     /* disable interrupts */
  277         callq   *%rdi                           /* push %rip and call isr */
  278         VLEAVE
  279         ret
  280 END(vmx_call_isr)

Cache object: de43d58b897f30579e22494ab1f2516a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.