1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.1/sys/amd64/vmm/intel/vmx.c 337828 2018-08-15 02:30:11Z delphij $
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/11.1/sys/amd64/vmm/intel/vmx.c 337828 2018-08-15 02:30:11Z delphij $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/smp.h>
49 #include <machine/specialreg.h>
50 #include <machine/vmparam.h>
51
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include <machine/vmm_instruction_emul.h>
55 #include "vmm_lapic.h"
56 #include "vmm_host.h"
57 #include "vmm_ioport.h"
58 #include "vmm_ktr.h"
59 #include "vmm_stat.h"
60 #include "vatpic.h"
61 #include "vlapic.h"
62 #include "vlapic_priv.h"
63
64 #include "ept.h"
65 #include "vmx_cpufunc.h"
66 #include "vmx.h"
67 #include "vmx_msr.h"
68 #include "x86.h"
69 #include "vmx_controls.h"
70
71 #define PINBASED_CTLS_ONE_SETTING \
72 (PINBASED_EXTINT_EXITING | \
73 PINBASED_NMI_EXITING | \
74 PINBASED_VIRTUAL_NMI)
75 #define PINBASED_CTLS_ZERO_SETTING 0
76
77 #define PROCBASED_CTLS_WINDOW_SETTING \
78 (PROCBASED_INT_WINDOW_EXITING | \
79 PROCBASED_NMI_WINDOW_EXITING)
80
81 #define PROCBASED_CTLS_ONE_SETTING \
82 (PROCBASED_SECONDARY_CONTROLS | \
83 PROCBASED_MWAIT_EXITING | \
84 PROCBASED_MONITOR_EXITING | \
85 PROCBASED_IO_EXITING | \
86 PROCBASED_MSR_BITMAPS | \
87 PROCBASED_CTLS_WINDOW_SETTING | \
88 PROCBASED_CR8_LOAD_EXITING | \
89 PROCBASED_CR8_STORE_EXITING)
90 #define PROCBASED_CTLS_ZERO_SETTING \
91 (PROCBASED_CR3_LOAD_EXITING | \
92 PROCBASED_CR3_STORE_EXITING | \
93 PROCBASED_IO_BITMAPS)
94
95 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
96 #define PROCBASED_CTLS2_ZERO_SETTING 0
97
98 #define VM_EXIT_CTLS_ONE_SETTING \
99 (VM_EXIT_HOST_LMA | \
100 VM_EXIT_SAVE_EFER | \
101 VM_EXIT_LOAD_EFER | \
102 VM_EXIT_ACKNOWLEDGE_INTERRUPT)
103
104 #define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
105
106 #define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER)
107
108 #define VM_ENTRY_CTLS_ZERO_SETTING \
109 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
110 VM_ENTRY_INTO_SMM | \
111 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
112
113 #define HANDLED 1
114 #define UNHANDLED 0
115
116 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
117 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
118
119 SYSCTL_DECL(_hw_vmm);
120 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121
122 int vmxon_enabled[MAXCPU];
123 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124
125 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
126 static uint32_t exit_ctls, entry_ctls;
127
128 static uint64_t cr0_ones_mask, cr0_zeros_mask;
129 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
130 &cr0_ones_mask, 0, NULL);
131 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
132 &cr0_zeros_mask, 0, NULL);
133
134 static uint64_t cr4_ones_mask, cr4_zeros_mask;
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
136 &cr4_ones_mask, 0, NULL);
137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
138 &cr4_zeros_mask, 0, NULL);
139
140 static int vmx_initialized;
141 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
142 &vmx_initialized, 0, "Intel VMX initialized");
143
144 /*
145 * Optional capabilities
146 */
147 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
148
149 static int cap_halt_exit;
150 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
151 "HLT triggers a VM-exit");
152
153 static int cap_pause_exit;
154 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
155 0, "PAUSE triggers a VM-exit");
156
157 static int cap_unrestricted_guest;
158 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
159 &cap_unrestricted_guest, 0, "Unrestricted guests");
160
161 static int cap_monitor_trap;
162 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
163 &cap_monitor_trap, 0, "Monitor trap flag");
164
165 static int cap_invpcid;
166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
167 0, "Guests are allowed to use INVPCID");
168
169 static int virtual_interrupt_delivery;
170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
171 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
172
173 static int posted_interrupts;
174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
175 &posted_interrupts, 0, "APICv posted interrupt support");
176
177 static int pirvec = -1;
178 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
179 &pirvec, 0, "APICv posted interrupt vector");
180
181 static struct unrhdr *vpid_unr;
182 static u_int vpid_alloc_failed;
183 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
184 &vpid_alloc_failed, 0, NULL);
185
186 static int guest_l1d_flush;
187 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
188 &guest_l1d_flush, 0, NULL);
189
190 uint64_t vmx_msr_flush_cmd;
191
192 /*
193 * Use the last page below 4GB as the APIC access address. This address is
194 * occupied by the boot firmware so it is guaranteed that it will not conflict
195 * with a page in system memory.
196 */
197 #define APIC_ACCESS_ADDRESS 0xFFFFF000
198
199 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
200 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
201 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
202 static void vmx_inject_pir(struct vlapic *vlapic);
203
204 #ifdef KTR
205 static const char *
206 exit_reason_to_str(int reason)
207 {
208 static char reasonbuf[32];
209
210 switch (reason) {
211 case EXIT_REASON_EXCEPTION:
212 return "exception";
213 case EXIT_REASON_EXT_INTR:
214 return "extint";
215 case EXIT_REASON_TRIPLE_FAULT:
216 return "triplefault";
217 case EXIT_REASON_INIT:
218 return "init";
219 case EXIT_REASON_SIPI:
220 return "sipi";
221 case EXIT_REASON_IO_SMI:
222 return "iosmi";
223 case EXIT_REASON_SMI:
224 return "smi";
225 case EXIT_REASON_INTR_WINDOW:
226 return "intrwindow";
227 case EXIT_REASON_NMI_WINDOW:
228 return "nmiwindow";
229 case EXIT_REASON_TASK_SWITCH:
230 return "taskswitch";
231 case EXIT_REASON_CPUID:
232 return "cpuid";
233 case EXIT_REASON_GETSEC:
234 return "getsec";
235 case EXIT_REASON_HLT:
236 return "hlt";
237 case EXIT_REASON_INVD:
238 return "invd";
239 case EXIT_REASON_INVLPG:
240 return "invlpg";
241 case EXIT_REASON_RDPMC:
242 return "rdpmc";
243 case EXIT_REASON_RDTSC:
244 return "rdtsc";
245 case EXIT_REASON_RSM:
246 return "rsm";
247 case EXIT_REASON_VMCALL:
248 return "vmcall";
249 case EXIT_REASON_VMCLEAR:
250 return "vmclear";
251 case EXIT_REASON_VMLAUNCH:
252 return "vmlaunch";
253 case EXIT_REASON_VMPTRLD:
254 return "vmptrld";
255 case EXIT_REASON_VMPTRST:
256 return "vmptrst";
257 case EXIT_REASON_VMREAD:
258 return "vmread";
259 case EXIT_REASON_VMRESUME:
260 return "vmresume";
261 case EXIT_REASON_VMWRITE:
262 return "vmwrite";
263 case EXIT_REASON_VMXOFF:
264 return "vmxoff";
265 case EXIT_REASON_VMXON:
266 return "vmxon";
267 case EXIT_REASON_CR_ACCESS:
268 return "craccess";
269 case EXIT_REASON_DR_ACCESS:
270 return "draccess";
271 case EXIT_REASON_INOUT:
272 return "inout";
273 case EXIT_REASON_RDMSR:
274 return "rdmsr";
275 case EXIT_REASON_WRMSR:
276 return "wrmsr";
277 case EXIT_REASON_INVAL_VMCS:
278 return "invalvmcs";
279 case EXIT_REASON_INVAL_MSR:
280 return "invalmsr";
281 case EXIT_REASON_MWAIT:
282 return "mwait";
283 case EXIT_REASON_MTF:
284 return "mtf";
285 case EXIT_REASON_MONITOR:
286 return "monitor";
287 case EXIT_REASON_PAUSE:
288 return "pause";
289 case EXIT_REASON_MCE_DURING_ENTRY:
290 return "mce-during-entry";
291 case EXIT_REASON_TPR:
292 return "tpr";
293 case EXIT_REASON_APIC_ACCESS:
294 return "apic-access";
295 case EXIT_REASON_GDTR_IDTR:
296 return "gdtridtr";
297 case EXIT_REASON_LDTR_TR:
298 return "ldtrtr";
299 case EXIT_REASON_EPT_FAULT:
300 return "eptfault";
301 case EXIT_REASON_EPT_MISCONFIG:
302 return "eptmisconfig";
303 case EXIT_REASON_INVEPT:
304 return "invept";
305 case EXIT_REASON_RDTSCP:
306 return "rdtscp";
307 case EXIT_REASON_VMX_PREEMPT:
308 return "vmxpreempt";
309 case EXIT_REASON_INVVPID:
310 return "invvpid";
311 case EXIT_REASON_WBINVD:
312 return "wbinvd";
313 case EXIT_REASON_XSETBV:
314 return "xsetbv";
315 case EXIT_REASON_APIC_WRITE:
316 return "apic-write";
317 default:
318 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
319 return (reasonbuf);
320 }
321 }
322 #endif /* KTR */
323
324 static int
325 vmx_allow_x2apic_msrs(struct vmx *vmx)
326 {
327 int i, error;
328
329 error = 0;
330
331 /*
332 * Allow readonly access to the following x2APIC MSRs from the guest.
333 */
334 error += guest_msr_ro(vmx, MSR_APIC_ID);
335 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
336 error += guest_msr_ro(vmx, MSR_APIC_LDR);
337 error += guest_msr_ro(vmx, MSR_APIC_SVR);
338
339 for (i = 0; i < 8; i++)
340 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
341
342 for (i = 0; i < 8; i++)
343 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
344
345 for (i = 0; i < 8; i++)
346 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
347
348 error += guest_msr_ro(vmx, MSR_APIC_ESR);
349 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
350 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
351 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
352 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
353 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
354 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
355 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
356 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
357 error += guest_msr_ro(vmx, MSR_APIC_ICR);
358
359 /*
360 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
361 *
362 * These registers get special treatment described in the section
363 * "Virtualizing MSR-Based APIC Accesses".
364 */
365 error += guest_msr_rw(vmx, MSR_APIC_TPR);
366 error += guest_msr_rw(vmx, MSR_APIC_EOI);
367 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
368
369 return (error);
370 }
371
372 u_long
373 vmx_fix_cr0(u_long cr0)
374 {
375
376 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
377 }
378
379 u_long
380 vmx_fix_cr4(u_long cr4)
381 {
382
383 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
384 }
385
386 static void
387 vpid_free(int vpid)
388 {
389 if (vpid < 0 || vpid > 0xffff)
390 panic("vpid_free: invalid vpid %d", vpid);
391
392 /*
393 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
394 * the unit number allocator.
395 */
396
397 if (vpid > VM_MAXCPU)
398 free_unr(vpid_unr, vpid);
399 }
400
401 static void
402 vpid_alloc(uint16_t *vpid, int num)
403 {
404 int i, x;
405
406 if (num <= 0 || num > VM_MAXCPU)
407 panic("invalid number of vpids requested: %d", num);
408
409 /*
410 * If the "enable vpid" execution control is not enabled then the
411 * VPID is required to be 0 for all vcpus.
412 */
413 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
414 for (i = 0; i < num; i++)
415 vpid[i] = 0;
416 return;
417 }
418
419 /*
420 * Allocate a unique VPID for each vcpu from the unit number allocator.
421 */
422 for (i = 0; i < num; i++) {
423 x = alloc_unr(vpid_unr);
424 if (x == -1)
425 break;
426 else
427 vpid[i] = x;
428 }
429
430 if (i < num) {
431 atomic_add_int(&vpid_alloc_failed, 1);
432
433 /*
434 * If the unit number allocator does not have enough unique
435 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
436 *
437 * These VPIDs are not be unique across VMs but this does not
438 * affect correctness because the combined mappings are also
439 * tagged with the EP4TA which is unique for each VM.
440 *
441 * It is still sub-optimal because the invvpid will invalidate
442 * combined mappings for a particular VPID across all EP4TAs.
443 */
444 while (i-- > 0)
445 vpid_free(vpid[i]);
446
447 for (i = 0; i < num; i++)
448 vpid[i] = i + 1;
449 }
450 }
451
452 static void
453 vpid_init(void)
454 {
455 /*
456 * VPID 0 is required when the "enable VPID" execution control is
457 * disabled.
458 *
459 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
460 * unit number allocator does not have sufficient unique VPIDs to
461 * satisfy the allocation.
462 *
463 * The remaining VPIDs are managed by the unit number allocator.
464 */
465 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
466 }
467
468 static void
469 vmx_disable(void *arg __unused)
470 {
471 struct invvpid_desc invvpid_desc = { 0 };
472 struct invept_desc invept_desc = { 0 };
473
474 if (vmxon_enabled[curcpu]) {
475 /*
476 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
477 *
478 * VMXON or VMXOFF are not required to invalidate any TLB
479 * caching structures. This prevents potential retention of
480 * cached information in the TLB between distinct VMX episodes.
481 */
482 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
483 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
484 vmxoff();
485 }
486 load_cr4(rcr4() & ~CR4_VMXE);
487 }
488
489 static int
490 vmx_cleanup(void)
491 {
492
493 if (pirvec >= 0)
494 lapic_ipi_free(pirvec);
495
496 if (vpid_unr != NULL) {
497 delete_unrhdr(vpid_unr);
498 vpid_unr = NULL;
499 }
500
501 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
502
503 return (0);
504 }
505
506 static void
507 vmx_enable(void *arg __unused)
508 {
509 int error;
510 uint64_t feature_control;
511
512 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
513 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
514 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
515 wrmsr(MSR_IA32_FEATURE_CONTROL,
516 feature_control | IA32_FEATURE_CONTROL_VMX_EN |
517 IA32_FEATURE_CONTROL_LOCK);
518 }
519
520 load_cr4(rcr4() | CR4_VMXE);
521
522 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
523 error = vmxon(vmxon_region[curcpu]);
524 if (error == 0)
525 vmxon_enabled[curcpu] = 1;
526 }
527
528 static void
529 vmx_restore(void)
530 {
531
532 if (vmxon_enabled[curcpu])
533 vmxon(vmxon_region[curcpu]);
534 }
535
536 static int
537 vmx_init(int ipinum)
538 {
539 int error, use_tpr_shadow;
540 uint64_t basic, fixed0, fixed1, feature_control;
541 uint32_t tmp, procbased2_vid_bits;
542
543 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
544 if (!(cpu_feature2 & CPUID2_VMX)) {
545 printf("vmx_init: processor does not support VMX operation\n");
546 return (ENXIO);
547 }
548
549 /*
550 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
551 * are set (bits 0 and 2 respectively).
552 */
553 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
554 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
555 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
556 printf("vmx_init: VMX operation disabled by BIOS\n");
557 return (ENXIO);
558 }
559
560 /*
561 * Verify capabilities MSR_VMX_BASIC:
562 * - bit 54 indicates support for INS/OUTS decoding
563 */
564 basic = rdmsr(MSR_VMX_BASIC);
565 if ((basic & (1UL << 54)) == 0) {
566 printf("vmx_init: processor does not support desired basic "
567 "capabilities\n");
568 return (EINVAL);
569 }
570
571 /* Check support for primary processor-based VM-execution controls */
572 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
573 MSR_VMX_TRUE_PROCBASED_CTLS,
574 PROCBASED_CTLS_ONE_SETTING,
575 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
576 if (error) {
577 printf("vmx_init: processor does not support desired primary "
578 "processor-based controls\n");
579 return (error);
580 }
581
582 /* Clear the processor-based ctl bits that are set on demand */
583 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
584
585 /* Check support for secondary processor-based VM-execution controls */
586 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
587 MSR_VMX_PROCBASED_CTLS2,
588 PROCBASED_CTLS2_ONE_SETTING,
589 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
590 if (error) {
591 printf("vmx_init: processor does not support desired secondary "
592 "processor-based controls\n");
593 return (error);
594 }
595
596 /* Check support for VPID */
597 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
598 PROCBASED2_ENABLE_VPID, 0, &tmp);
599 if (error == 0)
600 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
601
602 /* Check support for pin-based VM-execution controls */
603 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
604 MSR_VMX_TRUE_PINBASED_CTLS,
605 PINBASED_CTLS_ONE_SETTING,
606 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
607 if (error) {
608 printf("vmx_init: processor does not support desired "
609 "pin-based controls\n");
610 return (error);
611 }
612
613 /* Check support for VM-exit controls */
614 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
615 VM_EXIT_CTLS_ONE_SETTING,
616 VM_EXIT_CTLS_ZERO_SETTING,
617 &exit_ctls);
618 if (error) {
619 printf("vmx_init: processor does not support desired "
620 "exit controls\n");
621 return (error);
622 }
623
624 /* Check support for VM-entry controls */
625 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
626 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
627 &entry_ctls);
628 if (error) {
629 printf("vmx_init: processor does not support desired "
630 "entry controls\n");
631 return (error);
632 }
633
634 /*
635 * Check support for optional features by testing them
636 * as individual bits
637 */
638 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
639 MSR_VMX_TRUE_PROCBASED_CTLS,
640 PROCBASED_HLT_EXITING, 0,
641 &tmp) == 0);
642
643 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
644 MSR_VMX_PROCBASED_CTLS,
645 PROCBASED_MTF, 0,
646 &tmp) == 0);
647
648 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
649 MSR_VMX_TRUE_PROCBASED_CTLS,
650 PROCBASED_PAUSE_EXITING, 0,
651 &tmp) == 0);
652
653 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
654 MSR_VMX_PROCBASED_CTLS2,
655 PROCBASED2_UNRESTRICTED_GUEST, 0,
656 &tmp) == 0);
657
658 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
659 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
660 &tmp) == 0);
661
662 /*
663 * Check support for virtual interrupt delivery.
664 */
665 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
666 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
667 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
668 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
669
670 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
671 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
672 &tmp) == 0);
673
674 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
675 procbased2_vid_bits, 0, &tmp);
676 if (error == 0 && use_tpr_shadow) {
677 virtual_interrupt_delivery = 1;
678 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
679 &virtual_interrupt_delivery);
680 }
681
682 if (virtual_interrupt_delivery) {
683 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
684 procbased_ctls2 |= procbased2_vid_bits;
685 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
686
687 /*
688 * No need to emulate accesses to %CR8 if virtual
689 * interrupt delivery is enabled.
690 */
691 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
692 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
693
694 /*
695 * Check for Posted Interrupts only if Virtual Interrupt
696 * Delivery is enabled.
697 */
698 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
699 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
700 &tmp);
701 if (error == 0) {
702 pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
703 &IDTVEC(justreturn));
704 if (pirvec < 0) {
705 if (bootverbose) {
706 printf("vmx_init: unable to allocate "
707 "posted interrupt vector\n");
708 }
709 } else {
710 posted_interrupts = 1;
711 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
712 &posted_interrupts);
713 }
714 }
715 }
716
717 if (posted_interrupts)
718 pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
719
720 /* Initialize EPT */
721 error = ept_init(ipinum);
722 if (error) {
723 printf("vmx_init: ept initialization failed (%d)\n", error);
724 return (error);
725 }
726
727 guest_l1d_flush = (cpu_ia32_arch_caps & IA32_ARCH_CAP_RDCL_NO) == 0;
728 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
729 if (guest_l1d_flush &&
730 (cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) != 0)
731 vmx_msr_flush_cmd = IA32_FLUSH_CMD_L1D;
732
733 /*
734 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
735 */
736 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
737 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
738 cr0_ones_mask = fixed0 & fixed1;
739 cr0_zeros_mask = ~fixed0 & ~fixed1;
740
741 /*
742 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
743 * if unrestricted guest execution is allowed.
744 */
745 if (cap_unrestricted_guest)
746 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
747
748 /*
749 * Do not allow the guest to set CR0_NW or CR0_CD.
750 */
751 cr0_zeros_mask |= (CR0_NW | CR0_CD);
752
753 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
754 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
755 cr4_ones_mask = fixed0 & fixed1;
756 cr4_zeros_mask = ~fixed0 & ~fixed1;
757
758 vpid_init();
759
760 vmx_msr_init();
761
762 /* enable VMX operation */
763 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
764
765 vmx_initialized = 1;
766
767 return (0);
768 }
769
770 static void
771 vmx_trigger_hostintr(int vector)
772 {
773 uintptr_t func;
774 struct gate_descriptor *gd;
775
776 gd = &idt[vector];
777
778 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
779 "invalid vector %d", vector));
780 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
781 vector));
782 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
783 "has invalid type %d", vector, gd->gd_type));
784 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
785 "has invalid dpl %d", vector, gd->gd_dpl));
786 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
787 "for vector %d has invalid selector %d", vector, gd->gd_selector));
788 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
789 "IST %d", vector, gd->gd_ist));
790
791 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
792 vmx_call_isr(func);
793 }
794
795 static int
796 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
797 {
798 int error, mask_ident, shadow_ident;
799 uint64_t mask_value;
800
801 if (which != 0 && which != 4)
802 panic("vmx_setup_cr_shadow: unknown cr%d", which);
803
804 if (which == 0) {
805 mask_ident = VMCS_CR0_MASK;
806 mask_value = cr0_ones_mask | cr0_zeros_mask;
807 shadow_ident = VMCS_CR0_SHADOW;
808 } else {
809 mask_ident = VMCS_CR4_MASK;
810 mask_value = cr4_ones_mask | cr4_zeros_mask;
811 shadow_ident = VMCS_CR4_SHADOW;
812 }
813
814 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
815 if (error)
816 return (error);
817
818 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
819 if (error)
820 return (error);
821
822 return (0);
823 }
824 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
825 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
826
827 static void *
828 vmx_vminit(struct vm *vm, pmap_t pmap)
829 {
830 uint16_t vpid[VM_MAXCPU];
831 int i, error;
832 struct vmx *vmx;
833 struct vmcs *vmcs;
834 uint32_t exc_bitmap;
835
836 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
837 if ((uintptr_t)vmx & PAGE_MASK) {
838 panic("malloc of struct vmx not aligned on %d byte boundary",
839 PAGE_SIZE);
840 }
841 vmx->vm = vm;
842
843 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
844
845 /*
846 * Clean up EPTP-tagged guest physical and combined mappings
847 *
848 * VMX transitions are not required to invalidate any guest physical
849 * mappings. So, it may be possible for stale guest physical mappings
850 * to be present in the processor TLBs.
851 *
852 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
853 */
854 ept_invalidate_mappings(vmx->eptp);
855
856 msr_bitmap_initialize(vmx->msr_bitmap);
857
858 /*
859 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
860 * The guest FSBASE and GSBASE are saved and restored during
861 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
862 * always restored from the vmcs host state area on vm-exit.
863 *
864 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
865 * how they are saved/restored so can be directly accessed by the
866 * guest.
867 *
868 * MSR_EFER is saved and restored in the guest VMCS area on a
869 * VM exit and entry respectively. It is also restored from the
870 * host VMCS area on a VM exit.
871 *
872 * The TSC MSR is exposed read-only. Writes are disallowed as
873 * that will impact the host TSC. If the guest does a write
874 * the "use TSC offsetting" execution control is enabled and the
875 * difference between the host TSC and the guest TSC is written
876 * into the TSC offset in the VMCS.
877 */
878 if (guest_msr_rw(vmx, MSR_GSBASE) ||
879 guest_msr_rw(vmx, MSR_FSBASE) ||
880 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
881 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
882 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
883 guest_msr_rw(vmx, MSR_EFER) ||
884 guest_msr_ro(vmx, MSR_TSC))
885 panic("vmx_vminit: error setting guest msr access");
886
887 vpid_alloc(vpid, VM_MAXCPU);
888
889 if (virtual_interrupt_delivery) {
890 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
891 APIC_ACCESS_ADDRESS);
892 /* XXX this should really return an error to the caller */
893 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
894 }
895
896 for (i = 0; i < VM_MAXCPU; i++) {
897 vmcs = &vmx->vmcs[i];
898 vmcs->identifier = vmx_revision();
899 error = vmclear(vmcs);
900 if (error != 0) {
901 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
902 error, i);
903 }
904
905 vmx_msr_guest_init(vmx, i);
906
907 error = vmcs_init(vmcs);
908 KASSERT(error == 0, ("vmcs_init error %d", error));
909
910 VMPTRLD(vmcs);
911 error = 0;
912 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
913 error += vmwrite(VMCS_EPTP, vmx->eptp);
914 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
915 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
916 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
917 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
918 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
919 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
920 error += vmwrite(VMCS_VPID, vpid[i]);
921
922 /* exception bitmap */
923 if (vcpu_trace_exceptions(vm, i))
924 exc_bitmap = 0xffffffff;
925 else
926 exc_bitmap = 1 << IDT_MC;
927 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
928
929 if (virtual_interrupt_delivery) {
930 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
931 error += vmwrite(VMCS_VIRTUAL_APIC,
932 vtophys(&vmx->apic_page[i]));
933 error += vmwrite(VMCS_EOI_EXIT0, 0);
934 error += vmwrite(VMCS_EOI_EXIT1, 0);
935 error += vmwrite(VMCS_EOI_EXIT2, 0);
936 error += vmwrite(VMCS_EOI_EXIT3, 0);
937 }
938 if (posted_interrupts) {
939 error += vmwrite(VMCS_PIR_VECTOR, pirvec);
940 error += vmwrite(VMCS_PIR_DESC,
941 vtophys(&vmx->pir_desc[i]));
942 }
943 VMCLEAR(vmcs);
944 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
945
946 vmx->cap[i].set = 0;
947 vmx->cap[i].proc_ctls = procbased_ctls;
948 vmx->cap[i].proc_ctls2 = procbased_ctls2;
949
950 vmx->state[i].nextrip = ~0;
951 vmx->state[i].lastcpu = NOCPU;
952 vmx->state[i].vpid = vpid[i];
953
954 /*
955 * Set up the CR0/4 shadows, and init the read shadow
956 * to the power-on register value from the Intel Sys Arch.
957 * CR0 - 0x60000010
958 * CR4 - 0
959 */
960 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
961 if (error != 0)
962 panic("vmx_setup_cr0_shadow %d", error);
963
964 error = vmx_setup_cr4_shadow(vmcs, 0);
965 if (error != 0)
966 panic("vmx_setup_cr4_shadow %d", error);
967
968 vmx->ctx[i].pmap = pmap;
969 }
970
971 return (vmx);
972 }
973
974 static int
975 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
976 {
977 int handled, func;
978
979 func = vmxctx->guest_rax;
980
981 handled = x86_emulate_cpuid(vm, vcpu,
982 (uint32_t*)(&vmxctx->guest_rax),
983 (uint32_t*)(&vmxctx->guest_rbx),
984 (uint32_t*)(&vmxctx->guest_rcx),
985 (uint32_t*)(&vmxctx->guest_rdx));
986 return (handled);
987 }
988
989 static __inline void
990 vmx_run_trace(struct vmx *vmx, int vcpu)
991 {
992 #ifdef KTR
993 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
994 #endif
995 }
996
997 static __inline void
998 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
999 int handled)
1000 {
1001 #ifdef KTR
1002 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
1003 handled ? "handled" : "unhandled",
1004 exit_reason_to_str(exit_reason), rip);
1005 #endif
1006 }
1007
1008 static __inline void
1009 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1010 {
1011 #ifdef KTR
1012 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1013 #endif
1014 }
1015
1016 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1017 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1018
1019 /*
1020 * Invalidate guest mappings identified by its vpid from the TLB.
1021 */
1022 static __inline void
1023 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1024 {
1025 struct vmxstate *vmxstate;
1026 struct invvpid_desc invvpid_desc;
1027
1028 vmxstate = &vmx->state[vcpu];
1029 if (vmxstate->vpid == 0)
1030 return;
1031
1032 if (!running) {
1033 /*
1034 * Set the 'lastcpu' to an invalid host cpu.
1035 *
1036 * This will invalidate TLB entries tagged with the vcpu's
1037 * vpid the next time it runs via vmx_set_pcpu_defaults().
1038 */
1039 vmxstate->lastcpu = NOCPU;
1040 return;
1041 }
1042
1043 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1044 "critical section", __func__, vcpu));
1045
1046 /*
1047 * Invalidate all mappings tagged with 'vpid'
1048 *
1049 * We do this because this vcpu was executing on a different host
1050 * cpu when it last ran. We do not track whether it invalidated
1051 * mappings associated with its 'vpid' during that run. So we must
1052 * assume that the mappings associated with 'vpid' on 'curcpu' are
1053 * stale and invalidate them.
1054 *
1055 * Note that we incur this penalty only when the scheduler chooses to
1056 * move the thread associated with this vcpu between host cpus.
1057 *
1058 * Note also that this will invalidate mappings tagged with 'vpid'
1059 * for "all" EP4TAs.
1060 */
1061 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1062 invvpid_desc._res1 = 0;
1063 invvpid_desc._res2 = 0;
1064 invvpid_desc.vpid = vmxstate->vpid;
1065 invvpid_desc.linear_addr = 0;
1066 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1067 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1068 } else {
1069 /*
1070 * The invvpid can be skipped if an invept is going to
1071 * be performed before entering the guest. The invept
1072 * will invalidate combined mappings tagged with
1073 * 'vmx->eptp' for all vpids.
1074 */
1075 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1076 }
1077 }
1078
1079 static void
1080 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1081 {
1082 struct vmxstate *vmxstate;
1083
1084 vmxstate = &vmx->state[vcpu];
1085 if (vmxstate->lastcpu == curcpu)
1086 return;
1087
1088 vmxstate->lastcpu = curcpu;
1089
1090 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1091
1092 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1093 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1094 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1095 vmx_invvpid(vmx, vcpu, pmap, 1);
1096 }
1097
1098 /*
1099 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1100 */
1101 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1102
1103 static void __inline
1104 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1105 {
1106
1107 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1108 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1109 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1110 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1111 }
1112 }
1113
1114 static void __inline
1115 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1116 {
1117
1118 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1119 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1120 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1121 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1122 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1123 }
1124
1125 static void __inline
1126 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1127 {
1128
1129 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1130 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1131 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1132 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1133 }
1134 }
1135
1136 static void __inline
1137 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1138 {
1139
1140 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1141 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1142 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1143 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1144 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1145 }
1146
1147 int
1148 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1149 {
1150 int error;
1151
1152 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1153 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1154 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1155 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1156 }
1157
1158 error = vmwrite(VMCS_TSC_OFFSET, offset);
1159
1160 return (error);
1161 }
1162
1163 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1164 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1165 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1166 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1167
1168 static void
1169 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1170 {
1171 uint32_t gi, info;
1172
1173 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1174 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1175 "interruptibility-state %#x", gi));
1176
1177 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1178 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1179 "VM-entry interruption information %#x", info));
1180
1181 /*
1182 * Inject the virtual NMI. The vector must be the NMI IDT entry
1183 * or the VMCS entry check will fail.
1184 */
1185 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1186 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1187
1188 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1189
1190 /* Clear the request */
1191 vm_nmi_clear(vmx->vm, vcpu);
1192 }
1193
1194 static void
1195 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1196 uint64_t guestrip)
1197 {
1198 int vector, need_nmi_exiting, extint_pending;
1199 uint64_t rflags, entryinfo;
1200 uint32_t gi, info;
1201
1202 if (vmx->state[vcpu].nextrip != guestrip) {
1203 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1204 if (gi & HWINTR_BLOCKING) {
1205 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1206 "cleared due to rip change: %#lx/%#lx",
1207 vmx->state[vcpu].nextrip, guestrip);
1208 gi &= ~HWINTR_BLOCKING;
1209 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1210 }
1211 }
1212
1213 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1214 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1215 "intinfo is not valid: %#lx", __func__, entryinfo));
1216
1217 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1218 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1219 "pending exception: %#lx/%#x", __func__, entryinfo, info));
1220
1221 info = entryinfo;
1222 vector = info & 0xff;
1223 if (vector == IDT_BP || vector == IDT_OF) {
1224 /*
1225 * VT-x requires #BP and #OF to be injected as software
1226 * exceptions.
1227 */
1228 info &= ~VMCS_INTR_T_MASK;
1229 info |= VMCS_INTR_T_SWEXCEPTION;
1230 }
1231
1232 if (info & VMCS_INTR_DEL_ERRCODE)
1233 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1234
1235 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1236 }
1237
1238 if (vm_nmi_pending(vmx->vm, vcpu)) {
1239 /*
1240 * If there are no conditions blocking NMI injection then
1241 * inject it directly here otherwise enable "NMI window
1242 * exiting" to inject it as soon as we can.
1243 *
1244 * We also check for STI_BLOCKING because some implementations
1245 * don't allow NMI injection in this case. If we are running
1246 * on a processor that doesn't have this restriction it will
1247 * immediately exit and the NMI will be injected in the
1248 * "NMI window exiting" handler.
1249 */
1250 need_nmi_exiting = 1;
1251 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1252 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1253 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1254 if ((info & VMCS_INTR_VALID) == 0) {
1255 vmx_inject_nmi(vmx, vcpu);
1256 need_nmi_exiting = 0;
1257 } else {
1258 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1259 "due to VM-entry intr info %#x", info);
1260 }
1261 } else {
1262 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1263 "Guest Interruptibility-state %#x", gi);
1264 }
1265
1266 if (need_nmi_exiting)
1267 vmx_set_nmi_window_exiting(vmx, vcpu);
1268 }
1269
1270 extint_pending = vm_extint_pending(vmx->vm, vcpu);
1271
1272 if (!extint_pending && virtual_interrupt_delivery) {
1273 vmx_inject_pir(vlapic);
1274 return;
1275 }
1276
1277 /*
1278 * If interrupt-window exiting is already in effect then don't bother
1279 * checking for pending interrupts. This is just an optimization and
1280 * not needed for correctness.
1281 */
1282 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1283 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1284 "pending int_window_exiting");
1285 return;
1286 }
1287
1288 if (!extint_pending) {
1289 /* Ask the local apic for a vector to inject */
1290 if (!vlapic_pending_intr(vlapic, &vector))
1291 return;
1292
1293 /*
1294 * From the Intel SDM, Volume 3, Section "Maskable
1295 * Hardware Interrupts":
1296 * - maskable interrupt vectors [16,255] can be delivered
1297 * through the local APIC.
1298 */
1299 KASSERT(vector >= 16 && vector <= 255,
1300 ("invalid vector %d from local APIC", vector));
1301 } else {
1302 /* Ask the legacy pic for a vector to inject */
1303 vatpic_pending_intr(vmx->vm, &vector);
1304
1305 /*
1306 * From the Intel SDM, Volume 3, Section "Maskable
1307 * Hardware Interrupts":
1308 * - maskable interrupt vectors [0,255] can be delivered
1309 * through the INTR pin.
1310 */
1311 KASSERT(vector >= 0 && vector <= 255,
1312 ("invalid vector %d from INTR", vector));
1313 }
1314
1315 /* Check RFLAGS.IF and the interruptibility state of the guest */
1316 rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1317 if ((rflags & PSL_I) == 0) {
1318 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1319 "rflags %#lx", vector, rflags);
1320 goto cantinject;
1321 }
1322
1323 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1324 if (gi & HWINTR_BLOCKING) {
1325 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1326 "Guest Interruptibility-state %#x", vector, gi);
1327 goto cantinject;
1328 }
1329
1330 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1331 if (info & VMCS_INTR_VALID) {
1332 /*
1333 * This is expected and could happen for multiple reasons:
1334 * - A vectoring VM-entry was aborted due to astpending
1335 * - A VM-exit happened during event injection.
1336 * - An exception was injected above.
1337 * - An NMI was injected above or after "NMI window exiting"
1338 */
1339 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1340 "VM-entry intr info %#x", vector, info);
1341 goto cantinject;
1342 }
1343
1344 /* Inject the interrupt */
1345 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1346 info |= vector;
1347 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1348
1349 if (!extint_pending) {
1350 /* Update the Local APIC ISR */
1351 vlapic_intr_accepted(vlapic, vector);
1352 } else {
1353 vm_extint_clear(vmx->vm, vcpu);
1354 vatpic_intr_accepted(vmx->vm, vector);
1355
1356 /*
1357 * After we accepted the current ExtINT the PIC may
1358 * have posted another one. If that is the case, set
1359 * the Interrupt Window Exiting execution control so
1360 * we can inject that one too.
1361 *
1362 * Also, interrupt window exiting allows us to inject any
1363 * pending APIC vector that was preempted by the ExtINT
1364 * as soon as possible. This applies both for the software
1365 * emulated vlapic and the hardware assisted virtual APIC.
1366 */
1367 vmx_set_int_window_exiting(vmx, vcpu);
1368 }
1369
1370 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1371
1372 return;
1373
1374 cantinject:
1375 /*
1376 * Set the Interrupt Window Exiting execution control so we can inject
1377 * the interrupt as soon as blocking condition goes away.
1378 */
1379 vmx_set_int_window_exiting(vmx, vcpu);
1380 }
1381
1382 /*
1383 * If the Virtual NMIs execution control is '1' then the logical processor
1384 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1385 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1386 * virtual-NMI blocking.
1387 *
1388 * This unblocking occurs even if the IRET causes a fault. In this case the
1389 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1390 */
1391 static void
1392 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1393 {
1394 uint32_t gi;
1395
1396 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1397 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1398 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1399 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1400 }
1401
1402 static void
1403 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1404 {
1405 uint32_t gi;
1406
1407 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1408 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1409 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1410 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1411 }
1412
1413 static void
1414 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1415 {
1416 uint32_t gi;
1417
1418 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1419 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1420 ("NMI blocking is not in effect %#x", gi));
1421 }
1422
1423 static int
1424 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1425 {
1426 struct vmxctx *vmxctx;
1427 uint64_t xcrval;
1428 const struct xsave_limits *limits;
1429
1430 vmxctx = &vmx->ctx[vcpu];
1431 limits = vmm_get_xsave_limits();
1432
1433 /*
1434 * Note that the processor raises a GP# fault on its own if
1435 * xsetbv is executed for CPL != 0, so we do not have to
1436 * emulate that fault here.
1437 */
1438
1439 /* Only xcr0 is supported. */
1440 if (vmxctx->guest_rcx != 0) {
1441 vm_inject_gp(vmx->vm, vcpu);
1442 return (HANDLED);
1443 }
1444
1445 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1446 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1447 vm_inject_ud(vmx->vm, vcpu);
1448 return (HANDLED);
1449 }
1450
1451 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1452 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1453 vm_inject_gp(vmx->vm, vcpu);
1454 return (HANDLED);
1455 }
1456
1457 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1458 vm_inject_gp(vmx->vm, vcpu);
1459 return (HANDLED);
1460 }
1461
1462 /* AVX (YMM_Hi128) requires SSE. */
1463 if (xcrval & XFEATURE_ENABLED_AVX &&
1464 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1465 vm_inject_gp(vmx->vm, vcpu);
1466 return (HANDLED);
1467 }
1468
1469 /*
1470 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1471 * ZMM_Hi256, and Hi16_ZMM.
1472 */
1473 if (xcrval & XFEATURE_AVX512 &&
1474 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1475 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1476 vm_inject_gp(vmx->vm, vcpu);
1477 return (HANDLED);
1478 }
1479
1480 /*
1481 * Intel MPX requires both bound register state flags to be
1482 * set.
1483 */
1484 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1485 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1486 vm_inject_gp(vmx->vm, vcpu);
1487 return (HANDLED);
1488 }
1489
1490 /*
1491 * This runs "inside" vmrun() with the guest's FPU state, so
1492 * modifying xcr0 directly modifies the guest's xcr0, not the
1493 * host's.
1494 */
1495 load_xcr(0, xcrval);
1496 return (HANDLED);
1497 }
1498
1499 static uint64_t
1500 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1501 {
1502 const struct vmxctx *vmxctx;
1503
1504 vmxctx = &vmx->ctx[vcpu];
1505
1506 switch (ident) {
1507 case 0:
1508 return (vmxctx->guest_rax);
1509 case 1:
1510 return (vmxctx->guest_rcx);
1511 case 2:
1512 return (vmxctx->guest_rdx);
1513 case 3:
1514 return (vmxctx->guest_rbx);
1515 case 4:
1516 return (vmcs_read(VMCS_GUEST_RSP));
1517 case 5:
1518 return (vmxctx->guest_rbp);
1519 case 6:
1520 return (vmxctx->guest_rsi);
1521 case 7:
1522 return (vmxctx->guest_rdi);
1523 case 8:
1524 return (vmxctx->guest_r8);
1525 case 9:
1526 return (vmxctx->guest_r9);
1527 case 10:
1528 return (vmxctx->guest_r10);
1529 case 11:
1530 return (vmxctx->guest_r11);
1531 case 12:
1532 return (vmxctx->guest_r12);
1533 case 13:
1534 return (vmxctx->guest_r13);
1535 case 14:
1536 return (vmxctx->guest_r14);
1537 case 15:
1538 return (vmxctx->guest_r15);
1539 default:
1540 panic("invalid vmx register %d", ident);
1541 }
1542 }
1543
1544 static void
1545 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1546 {
1547 struct vmxctx *vmxctx;
1548
1549 vmxctx = &vmx->ctx[vcpu];
1550
1551 switch (ident) {
1552 case 0:
1553 vmxctx->guest_rax = regval;
1554 break;
1555 case 1:
1556 vmxctx->guest_rcx = regval;
1557 break;
1558 case 2:
1559 vmxctx->guest_rdx = regval;
1560 break;
1561 case 3:
1562 vmxctx->guest_rbx = regval;
1563 break;
1564 case 4:
1565 vmcs_write(VMCS_GUEST_RSP, regval);
1566 break;
1567 case 5:
1568 vmxctx->guest_rbp = regval;
1569 break;
1570 case 6:
1571 vmxctx->guest_rsi = regval;
1572 break;
1573 case 7:
1574 vmxctx->guest_rdi = regval;
1575 break;
1576 case 8:
1577 vmxctx->guest_r8 = regval;
1578 break;
1579 case 9:
1580 vmxctx->guest_r9 = regval;
1581 break;
1582 case 10:
1583 vmxctx->guest_r10 = regval;
1584 break;
1585 case 11:
1586 vmxctx->guest_r11 = regval;
1587 break;
1588 case 12:
1589 vmxctx->guest_r12 = regval;
1590 break;
1591 case 13:
1592 vmxctx->guest_r13 = regval;
1593 break;
1594 case 14:
1595 vmxctx->guest_r14 = regval;
1596 break;
1597 case 15:
1598 vmxctx->guest_r15 = regval;
1599 break;
1600 default:
1601 panic("invalid vmx register %d", ident);
1602 }
1603 }
1604
1605 static int
1606 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1607 {
1608 uint64_t crval, regval;
1609
1610 /* We only handle mov to %cr0 at this time */
1611 if ((exitqual & 0xf0) != 0x00)
1612 return (UNHANDLED);
1613
1614 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1615
1616 vmcs_write(VMCS_CR0_SHADOW, regval);
1617
1618 crval = regval | cr0_ones_mask;
1619 crval &= ~cr0_zeros_mask;
1620 vmcs_write(VMCS_GUEST_CR0, crval);
1621
1622 if (regval & CR0_PG) {
1623 uint64_t efer, entry_ctls;
1624
1625 /*
1626 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1627 * the "IA-32e mode guest" bit in VM-entry control must be
1628 * equal.
1629 */
1630 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1631 if (efer & EFER_LME) {
1632 efer |= EFER_LMA;
1633 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1634 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1635 entry_ctls |= VM_ENTRY_GUEST_LMA;
1636 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1637 }
1638 }
1639
1640 return (HANDLED);
1641 }
1642
1643 static int
1644 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1645 {
1646 uint64_t crval, regval;
1647
1648 /* We only handle mov to %cr4 at this time */
1649 if ((exitqual & 0xf0) != 0x00)
1650 return (UNHANDLED);
1651
1652 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1653
1654 vmcs_write(VMCS_CR4_SHADOW, regval);
1655
1656 crval = regval | cr4_ones_mask;
1657 crval &= ~cr4_zeros_mask;
1658 vmcs_write(VMCS_GUEST_CR4, crval);
1659
1660 return (HANDLED);
1661 }
1662
1663 static int
1664 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1665 {
1666 struct vlapic *vlapic;
1667 uint64_t cr8;
1668 int regnum;
1669
1670 /* We only handle mov %cr8 to/from a register at this time. */
1671 if ((exitqual & 0xe0) != 0x00) {
1672 return (UNHANDLED);
1673 }
1674
1675 vlapic = vm_lapic(vmx->vm, vcpu);
1676 regnum = (exitqual >> 8) & 0xf;
1677 if (exitqual & 0x10) {
1678 cr8 = vlapic_get_cr8(vlapic);
1679 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1680 } else {
1681 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1682 vlapic_set_cr8(vlapic, cr8);
1683 }
1684
1685 return (HANDLED);
1686 }
1687
1688 /*
1689 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1690 */
1691 static int
1692 vmx_cpl(void)
1693 {
1694 uint32_t ssar;
1695
1696 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1697 return ((ssar >> 5) & 0x3);
1698 }
1699
1700 static enum vm_cpu_mode
1701 vmx_cpu_mode(void)
1702 {
1703 uint32_t csar;
1704
1705 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1706 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1707 if (csar & 0x2000)
1708 return (CPU_MODE_64BIT); /* CS.L = 1 */
1709 else
1710 return (CPU_MODE_COMPATIBILITY);
1711 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1712 return (CPU_MODE_PROTECTED);
1713 } else {
1714 return (CPU_MODE_REAL);
1715 }
1716 }
1717
1718 static enum vm_paging_mode
1719 vmx_paging_mode(void)
1720 {
1721
1722 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1723 return (PAGING_MODE_FLAT);
1724 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1725 return (PAGING_MODE_32);
1726 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1727 return (PAGING_MODE_64);
1728 else
1729 return (PAGING_MODE_PAE);
1730 }
1731
1732 static uint64_t
1733 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1734 {
1735 uint64_t val;
1736 int error;
1737 enum vm_reg_name reg;
1738
1739 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1740 error = vmx_getreg(vmx, vcpuid, reg, &val);
1741 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1742 return (val);
1743 }
1744
1745 static uint64_t
1746 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1747 {
1748 uint64_t val;
1749 int error;
1750
1751 if (rep) {
1752 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1753 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1754 } else {
1755 val = 1;
1756 }
1757 return (val);
1758 }
1759
1760 static int
1761 inout_str_addrsize(uint32_t inst_info)
1762 {
1763 uint32_t size;
1764
1765 size = (inst_info >> 7) & 0x7;
1766 switch (size) {
1767 case 0:
1768 return (2); /* 16 bit */
1769 case 1:
1770 return (4); /* 32 bit */
1771 case 2:
1772 return (8); /* 64 bit */
1773 default:
1774 panic("%s: invalid size encoding %d", __func__, size);
1775 }
1776 }
1777
1778 static void
1779 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1780 struct vm_inout_str *vis)
1781 {
1782 int error, s;
1783
1784 if (in) {
1785 vis->seg_name = VM_REG_GUEST_ES;
1786 } else {
1787 s = (inst_info >> 15) & 0x7;
1788 vis->seg_name = vm_segment_name(s);
1789 }
1790
1791 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1792 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1793 }
1794
1795 static void
1796 vmx_paging_info(struct vm_guest_paging *paging)
1797 {
1798 paging->cr3 = vmcs_guest_cr3();
1799 paging->cpl = vmx_cpl();
1800 paging->cpu_mode = vmx_cpu_mode();
1801 paging->paging_mode = vmx_paging_mode();
1802 }
1803
1804 static void
1805 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1806 {
1807 struct vm_guest_paging *paging;
1808 uint32_t csar;
1809
1810 paging = &vmexit->u.inst_emul.paging;
1811
1812 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1813 vmexit->inst_length = 0;
1814 vmexit->u.inst_emul.gpa = gpa;
1815 vmexit->u.inst_emul.gla = gla;
1816 vmx_paging_info(paging);
1817 switch (paging->cpu_mode) {
1818 case CPU_MODE_REAL:
1819 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1820 vmexit->u.inst_emul.cs_d = 0;
1821 break;
1822 case CPU_MODE_PROTECTED:
1823 case CPU_MODE_COMPATIBILITY:
1824 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1825 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1826 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1827 break;
1828 default:
1829 vmexit->u.inst_emul.cs_base = 0;
1830 vmexit->u.inst_emul.cs_d = 0;
1831 break;
1832 }
1833 vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1834 }
1835
1836 static int
1837 ept_fault_type(uint64_t ept_qual)
1838 {
1839 int fault_type;
1840
1841 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1842 fault_type = VM_PROT_WRITE;
1843 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1844 fault_type = VM_PROT_EXECUTE;
1845 else
1846 fault_type= VM_PROT_READ;
1847
1848 return (fault_type);
1849 }
1850
1851 static boolean_t
1852 ept_emulation_fault(uint64_t ept_qual)
1853 {
1854 int read, write;
1855
1856 /* EPT fault on an instruction fetch doesn't make sense here */
1857 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1858 return (FALSE);
1859
1860 /* EPT fault must be a read fault or a write fault */
1861 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1862 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1863 if ((read | write) == 0)
1864 return (FALSE);
1865
1866 /*
1867 * The EPT violation must have been caused by accessing a
1868 * guest-physical address that is a translation of a guest-linear
1869 * address.
1870 */
1871 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1872 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1873 return (FALSE);
1874 }
1875
1876 return (TRUE);
1877 }
1878
1879 static __inline int
1880 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1881 {
1882 uint32_t proc_ctls2;
1883
1884 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1885 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1886 }
1887
1888 static __inline int
1889 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1890 {
1891 uint32_t proc_ctls2;
1892
1893 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1894 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1895 }
1896
1897 static int
1898 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1899 uint64_t qual)
1900 {
1901 int error, handled, offset;
1902 uint32_t *apic_regs, vector;
1903 bool retu;
1904
1905 handled = HANDLED;
1906 offset = APIC_WRITE_OFFSET(qual);
1907
1908 if (!apic_access_virtualization(vmx, vcpuid)) {
1909 /*
1910 * In general there should not be any APIC write VM-exits
1911 * unless APIC-access virtualization is enabled.
1912 *
1913 * However self-IPI virtualization can legitimately trigger
1914 * an APIC-write VM-exit so treat it specially.
1915 */
1916 if (x2apic_virtualization(vmx, vcpuid) &&
1917 offset == APIC_OFFSET_SELF_IPI) {
1918 apic_regs = (uint32_t *)(vlapic->apic_page);
1919 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1920 vlapic_self_ipi_handler(vlapic, vector);
1921 return (HANDLED);
1922 } else
1923 return (UNHANDLED);
1924 }
1925
1926 switch (offset) {
1927 case APIC_OFFSET_ID:
1928 vlapic_id_write_handler(vlapic);
1929 break;
1930 case APIC_OFFSET_LDR:
1931 vlapic_ldr_write_handler(vlapic);
1932 break;
1933 case APIC_OFFSET_DFR:
1934 vlapic_dfr_write_handler(vlapic);
1935 break;
1936 case APIC_OFFSET_SVR:
1937 vlapic_svr_write_handler(vlapic);
1938 break;
1939 case APIC_OFFSET_ESR:
1940 vlapic_esr_write_handler(vlapic);
1941 break;
1942 case APIC_OFFSET_ICR_LOW:
1943 retu = false;
1944 error = vlapic_icrlo_write_handler(vlapic, &retu);
1945 if (error != 0 || retu)
1946 handled = UNHANDLED;
1947 break;
1948 case APIC_OFFSET_CMCI_LVT:
1949 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1950 vlapic_lvt_write_handler(vlapic, offset);
1951 break;
1952 case APIC_OFFSET_TIMER_ICR:
1953 vlapic_icrtmr_write_handler(vlapic);
1954 break;
1955 case APIC_OFFSET_TIMER_DCR:
1956 vlapic_dcr_write_handler(vlapic);
1957 break;
1958 default:
1959 handled = UNHANDLED;
1960 break;
1961 }
1962 return (handled);
1963 }
1964
1965 static bool
1966 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1967 {
1968
1969 if (apic_access_virtualization(vmx, vcpuid) &&
1970 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1971 return (true);
1972 else
1973 return (false);
1974 }
1975
1976 static int
1977 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1978 {
1979 uint64_t qual;
1980 int access_type, offset, allowed;
1981
1982 if (!apic_access_virtualization(vmx, vcpuid))
1983 return (UNHANDLED);
1984
1985 qual = vmexit->u.vmx.exit_qualification;
1986 access_type = APIC_ACCESS_TYPE(qual);
1987 offset = APIC_ACCESS_OFFSET(qual);
1988
1989 allowed = 0;
1990 if (access_type == 0) {
1991 /*
1992 * Read data access to the following registers is expected.
1993 */
1994 switch (offset) {
1995 case APIC_OFFSET_APR:
1996 case APIC_OFFSET_PPR:
1997 case APIC_OFFSET_RRR:
1998 case APIC_OFFSET_CMCI_LVT:
1999 case APIC_OFFSET_TIMER_CCR:
2000 allowed = 1;
2001 break;
2002 default:
2003 break;
2004 }
2005 } else if (access_type == 1) {
2006 /*
2007 * Write data access to the following registers is expected.
2008 */
2009 switch (offset) {
2010 case APIC_OFFSET_VER:
2011 case APIC_OFFSET_APR:
2012 case APIC_OFFSET_PPR:
2013 case APIC_OFFSET_RRR:
2014 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2015 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2016 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2017 case APIC_OFFSET_CMCI_LVT:
2018 case APIC_OFFSET_TIMER_CCR:
2019 allowed = 1;
2020 break;
2021 default:
2022 break;
2023 }
2024 }
2025
2026 if (allowed) {
2027 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2028 VIE_INVALID_GLA);
2029 }
2030
2031 /*
2032 * Regardless of whether the APIC-access is allowed this handler
2033 * always returns UNHANDLED:
2034 * - if the access is allowed then it is handled by emulating the
2035 * instruction that caused the VM-exit (outside the critical section)
2036 * - if the access is not allowed then it will be converted to an
2037 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2038 */
2039 return (UNHANDLED);
2040 }
2041
2042 static enum task_switch_reason
2043 vmx_task_switch_reason(uint64_t qual)
2044 {
2045 int reason;
2046
2047 reason = (qual >> 30) & 0x3;
2048 switch (reason) {
2049 case 0:
2050 return (TSR_CALL);
2051 case 1:
2052 return (TSR_IRET);
2053 case 2:
2054 return (TSR_JMP);
2055 case 3:
2056 return (TSR_IDT_GATE);
2057 default:
2058 panic("%s: invalid reason %d", __func__, reason);
2059 }
2060 }
2061
2062 static int
2063 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2064 {
2065 int error;
2066
2067 if (lapic_msr(num))
2068 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2069 else
2070 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2071
2072 return (error);
2073 }
2074
2075 static int
2076 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2077 {
2078 struct vmxctx *vmxctx;
2079 uint64_t result;
2080 uint32_t eax, edx;
2081 int error;
2082
2083 if (lapic_msr(num))
2084 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2085 else
2086 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2087
2088 if (error == 0) {
2089 eax = result;
2090 vmxctx = &vmx->ctx[vcpuid];
2091 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2092 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2093
2094 edx = result >> 32;
2095 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2096 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2097 }
2098
2099 return (error);
2100 }
2101
2102 static int
2103 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2104 {
2105 int error, errcode, errcode_valid, handled, in;
2106 struct vmxctx *vmxctx;
2107 struct vlapic *vlapic;
2108 struct vm_inout_str *vis;
2109 struct vm_task_switch *ts;
2110 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2111 uint32_t intr_type, intr_vec, reason;
2112 uint64_t exitintinfo, qual, gpa;
2113 bool retu;
2114
2115 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2116 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2117
2118 handled = UNHANDLED;
2119 vmxctx = &vmx->ctx[vcpu];
2120
2121 qual = vmexit->u.vmx.exit_qualification;
2122 reason = vmexit->u.vmx.exit_reason;
2123 vmexit->exitcode = VM_EXITCODE_BOGUS;
2124
2125 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2126
2127 /*
2128 * VM-entry failures during or after loading guest state.
2129 *
2130 * These VM-exits are uncommon but must be handled specially
2131 * as most VM-exit fields are not populated as usual.
2132 */
2133 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2134 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2135 __asm __volatile("int $18");
2136 return (1);
2137 }
2138
2139 /*
2140 * VM exits that can be triggered during event delivery need to
2141 * be handled specially by re-injecting the event if the IDT
2142 * vectoring information field's valid bit is set.
2143 *
2144 * See "Information for VM Exits During Event Delivery" in Intel SDM
2145 * for details.
2146 */
2147 idtvec_info = vmcs_idt_vectoring_info();
2148 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2149 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2150 exitintinfo = idtvec_info;
2151 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2152 idtvec_err = vmcs_idt_vectoring_err();
2153 exitintinfo |= (uint64_t)idtvec_err << 32;
2154 }
2155 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2156 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2157 __func__, error));
2158
2159 /*
2160 * If 'virtual NMIs' are being used and the VM-exit
2161 * happened while injecting an NMI during the previous
2162 * VM-entry, then clear "blocking by NMI" in the
2163 * Guest Interruptibility-State so the NMI can be
2164 * reinjected on the subsequent VM-entry.
2165 *
2166 * However, if the NMI was being delivered through a task
2167 * gate, then the new task must start execution with NMIs
2168 * blocked so don't clear NMI blocking in this case.
2169 */
2170 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2171 if (intr_type == VMCS_INTR_T_NMI) {
2172 if (reason != EXIT_REASON_TASK_SWITCH)
2173 vmx_clear_nmi_blocking(vmx, vcpu);
2174 else
2175 vmx_assert_nmi_blocking(vmx, vcpu);
2176 }
2177
2178 /*
2179 * Update VM-entry instruction length if the event being
2180 * delivered was a software interrupt or software exception.
2181 */
2182 if (intr_type == VMCS_INTR_T_SWINTR ||
2183 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2184 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2185 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2186 }
2187 }
2188
2189 switch (reason) {
2190 case EXIT_REASON_TASK_SWITCH:
2191 ts = &vmexit->u.task_switch;
2192 ts->tsssel = qual & 0xffff;
2193 ts->reason = vmx_task_switch_reason(qual);
2194 ts->ext = 0;
2195 ts->errcode_valid = 0;
2196 vmx_paging_info(&ts->paging);
2197 /*
2198 * If the task switch was due to a CALL, JMP, IRET, software
2199 * interrupt (INT n) or software exception (INT3, INTO),
2200 * then the saved %rip references the instruction that caused
2201 * the task switch. The instruction length field in the VMCS
2202 * is valid in this case.
2203 *
2204 * In all other cases (e.g., NMI, hardware exception) the
2205 * saved %rip is one that would have been saved in the old TSS
2206 * had the task switch completed normally so the instruction
2207 * length field is not needed in this case and is explicitly
2208 * set to 0.
2209 */
2210 if (ts->reason == TSR_IDT_GATE) {
2211 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2212 ("invalid idtvec_info %#x for IDT task switch",
2213 idtvec_info));
2214 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2215 if (intr_type != VMCS_INTR_T_SWINTR &&
2216 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2217 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2218 /* Task switch triggered by external event */
2219 ts->ext = 1;
2220 vmexit->inst_length = 0;
2221 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2222 ts->errcode_valid = 1;
2223 ts->errcode = vmcs_idt_vectoring_err();
2224 }
2225 }
2226 }
2227 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2228 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2229 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2230 ts->ext ? "external" : "internal",
2231 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2232 break;
2233 case EXIT_REASON_CR_ACCESS:
2234 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2235 switch (qual & 0xf) {
2236 case 0:
2237 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2238 break;
2239 case 4:
2240 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2241 break;
2242 case 8:
2243 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2244 break;
2245 }
2246 break;
2247 case EXIT_REASON_RDMSR:
2248 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2249 retu = false;
2250 ecx = vmxctx->guest_rcx;
2251 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2252 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2253 if (error) {
2254 vmexit->exitcode = VM_EXITCODE_RDMSR;
2255 vmexit->u.msr.code = ecx;
2256 } else if (!retu) {
2257 handled = HANDLED;
2258 } else {
2259 /* Return to userspace with a valid exitcode */
2260 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2261 ("emulate_rdmsr retu with bogus exitcode"));
2262 }
2263 break;
2264 case EXIT_REASON_WRMSR:
2265 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2266 retu = false;
2267 eax = vmxctx->guest_rax;
2268 ecx = vmxctx->guest_rcx;
2269 edx = vmxctx->guest_rdx;
2270 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2271 ecx, (uint64_t)edx << 32 | eax);
2272 error = emulate_wrmsr(vmx, vcpu, ecx,
2273 (uint64_t)edx << 32 | eax, &retu);
2274 if (error) {
2275 vmexit->exitcode = VM_EXITCODE_WRMSR;
2276 vmexit->u.msr.code = ecx;
2277 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2278 } else if (!retu) {
2279 handled = HANDLED;
2280 } else {
2281 /* Return to userspace with a valid exitcode */
2282 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2283 ("emulate_wrmsr retu with bogus exitcode"));
2284 }
2285 break;
2286 case EXIT_REASON_HLT:
2287 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2288 vmexit->exitcode = VM_EXITCODE_HLT;
2289 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2290 break;
2291 case EXIT_REASON_MTF:
2292 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2293 vmexit->exitcode = VM_EXITCODE_MTRAP;
2294 vmexit->inst_length = 0;
2295 break;
2296 case EXIT_REASON_PAUSE:
2297 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2298 vmexit->exitcode = VM_EXITCODE_PAUSE;
2299 break;
2300 case EXIT_REASON_INTR_WINDOW:
2301 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2302 vmx_clear_int_window_exiting(vmx, vcpu);
2303 return (1);
2304 case EXIT_REASON_EXT_INTR:
2305 /*
2306 * External interrupts serve only to cause VM exits and allow
2307 * the host interrupt handler to run.
2308 *
2309 * If this external interrupt triggers a virtual interrupt
2310 * to a VM, then that state will be recorded by the
2311 * host interrupt handler in the VM's softc. We will inject
2312 * this virtual interrupt during the subsequent VM enter.
2313 */
2314 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2315
2316 /*
2317 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2318 * This appears to be a bug in VMware Fusion?
2319 */
2320 if (!(intr_info & VMCS_INTR_VALID))
2321 return (1);
2322 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2323 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2324 ("VM exit interruption info invalid: %#x", intr_info));
2325 vmx_trigger_hostintr(intr_info & 0xff);
2326
2327 /*
2328 * This is special. We want to treat this as an 'handled'
2329 * VM-exit but not increment the instruction pointer.
2330 */
2331 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2332 return (1);
2333 case EXIT_REASON_NMI_WINDOW:
2334 /* Exit to allow the pending virtual NMI to be injected */
2335 if (vm_nmi_pending(vmx->vm, vcpu))
2336 vmx_inject_nmi(vmx, vcpu);
2337 vmx_clear_nmi_window_exiting(vmx, vcpu);
2338 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2339 return (1);
2340 case EXIT_REASON_INOUT:
2341 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2342 vmexit->exitcode = VM_EXITCODE_INOUT;
2343 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2344 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2345 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2346 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2347 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2348 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2349 if (vmexit->u.inout.string) {
2350 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2351 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2352 vis = &vmexit->u.inout_str;
2353 vmx_paging_info(&vis->paging);
2354 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2355 vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2356 vis->index = inout_str_index(vmx, vcpu, in);
2357 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2358 vis->addrsize = inout_str_addrsize(inst_info);
2359 inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2360 }
2361 break;
2362 case EXIT_REASON_CPUID:
2363 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2364 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2365 break;
2366 case EXIT_REASON_EXCEPTION:
2367 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2368 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2369 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2370 ("VM exit interruption info invalid: %#x", intr_info));
2371
2372 intr_vec = intr_info & 0xff;
2373 intr_type = intr_info & VMCS_INTR_T_MASK;
2374
2375 /*
2376 * If Virtual NMIs control is 1 and the VM-exit is due to a
2377 * fault encountered during the execution of IRET then we must
2378 * restore the state of "virtual-NMI blocking" before resuming
2379 * the guest.
2380 *
2381 * See "Resuming Guest Software after Handling an Exception".
2382 * See "Information for VM Exits Due to Vectored Events".
2383 */
2384 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2385 (intr_vec != IDT_DF) &&
2386 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2387 vmx_restore_nmi_blocking(vmx, vcpu);
2388
2389 /*
2390 * The NMI has already been handled in vmx_exit_handle_nmi().
2391 */
2392 if (intr_type == VMCS_INTR_T_NMI)
2393 return (1);
2394
2395 /*
2396 * Call the machine check handler by hand. Also don't reflect
2397 * the machine check back into the guest.
2398 */
2399 if (intr_vec == IDT_MC) {
2400 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2401 __asm __volatile("int $18");
2402 return (1);
2403 }
2404
2405 if (intr_vec == IDT_PF) {
2406 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2407 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2408 __func__, error));
2409 }
2410
2411 /*
2412 * Software exceptions exhibit trap-like behavior. This in
2413 * turn requires populating the VM-entry instruction length
2414 * so that the %rip in the trap frame is past the INT3/INTO
2415 * instruction.
2416 */
2417 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2418 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2419
2420 /* Reflect all other exceptions back into the guest */
2421 errcode_valid = errcode = 0;
2422 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2423 errcode_valid = 1;
2424 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2425 }
2426 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2427 "the guest", intr_vec, errcode);
2428 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2429 errcode_valid, errcode, 0);
2430 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2431 __func__, error));
2432 return (1);
2433
2434 case EXIT_REASON_EPT_FAULT:
2435 /*
2436 * If 'gpa' lies within the address space allocated to
2437 * memory then this must be a nested page fault otherwise
2438 * this must be an instruction that accesses MMIO space.
2439 */
2440 gpa = vmcs_gpa();
2441 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2442 apic_access_fault(vmx, vcpu, gpa)) {
2443 vmexit->exitcode = VM_EXITCODE_PAGING;
2444 vmexit->inst_length = 0;
2445 vmexit->u.paging.gpa = gpa;
2446 vmexit->u.paging.fault_type = ept_fault_type(qual);
2447 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2448 } else if (ept_emulation_fault(qual)) {
2449 vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2450 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2451 }
2452 /*
2453 * If Virtual NMIs control is 1 and the VM-exit is due to an
2454 * EPT fault during the execution of IRET then we must restore
2455 * the state of "virtual-NMI blocking" before resuming.
2456 *
2457 * See description of "NMI unblocking due to IRET" in
2458 * "Exit Qualification for EPT Violations".
2459 */
2460 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2461 (qual & EXIT_QUAL_NMIUDTI) != 0)
2462 vmx_restore_nmi_blocking(vmx, vcpu);
2463 break;
2464 case EXIT_REASON_VIRTUALIZED_EOI:
2465 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2466 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2467 vmexit->inst_length = 0; /* trap-like */
2468 break;
2469 case EXIT_REASON_APIC_ACCESS:
2470 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2471 break;
2472 case EXIT_REASON_APIC_WRITE:
2473 /*
2474 * APIC-write VM exit is trap-like so the %rip is already
2475 * pointing to the next instruction.
2476 */
2477 vmexit->inst_length = 0;
2478 vlapic = vm_lapic(vmx->vm, vcpu);
2479 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2480 break;
2481 case EXIT_REASON_XSETBV:
2482 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2483 break;
2484 case EXIT_REASON_MONITOR:
2485 vmexit->exitcode = VM_EXITCODE_MONITOR;
2486 break;
2487 case EXIT_REASON_MWAIT:
2488 vmexit->exitcode = VM_EXITCODE_MWAIT;
2489 break;
2490 default:
2491 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2492 break;
2493 }
2494
2495 if (handled) {
2496 /*
2497 * It is possible that control is returned to userland
2498 * even though we were able to handle the VM exit in the
2499 * kernel.
2500 *
2501 * In such a case we want to make sure that the userland
2502 * restarts guest execution at the instruction *after*
2503 * the one we just processed. Therefore we update the
2504 * guest rip in the VMCS and in 'vmexit'.
2505 */
2506 vmexit->rip += vmexit->inst_length;
2507 vmexit->inst_length = 0;
2508 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2509 } else {
2510 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2511 /*
2512 * If this VM exit was not claimed by anybody then
2513 * treat it as a generic VMX exit.
2514 */
2515 vmexit->exitcode = VM_EXITCODE_VMX;
2516 vmexit->u.vmx.status = VM_SUCCESS;
2517 vmexit->u.vmx.inst_type = 0;
2518 vmexit->u.vmx.inst_error = 0;
2519 } else {
2520 /*
2521 * The exitcode and collateral have been populated.
2522 * The VM exit will be processed further in userland.
2523 */
2524 }
2525 }
2526 return (handled);
2527 }
2528
2529 static __inline void
2530 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2531 {
2532
2533 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2534 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2535 vmxctx->inst_fail_status));
2536
2537 vmexit->inst_length = 0;
2538 vmexit->exitcode = VM_EXITCODE_VMX;
2539 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2540 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2541 vmexit->u.vmx.exit_reason = ~0;
2542 vmexit->u.vmx.exit_qualification = ~0;
2543
2544 switch (rc) {
2545 case VMX_VMRESUME_ERROR:
2546 case VMX_VMLAUNCH_ERROR:
2547 case VMX_INVEPT_ERROR:
2548 vmexit->u.vmx.inst_type = rc;
2549 break;
2550 default:
2551 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2552 }
2553 }
2554
2555 /*
2556 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2557 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2558 * sufficient to simply vector to the NMI handler via a software interrupt.
2559 * However, this must be done before maskable interrupts are enabled
2560 * otherwise the "iret" issued by an interrupt handler will incorrectly
2561 * clear NMI blocking.
2562 */
2563 static __inline void
2564 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2565 {
2566 uint32_t intr_info;
2567
2568 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2569
2570 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2571 return;
2572
2573 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2574 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2575 ("VM exit interruption info invalid: %#x", intr_info));
2576
2577 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2578 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2579 "to NMI has invalid vector: %#x", intr_info));
2580 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2581 __asm __volatile("int $2");
2582 }
2583 }
2584
2585 static int
2586 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2587 struct vm_eventinfo *evinfo)
2588 {
2589 int rc, handled, launched;
2590 struct vmx *vmx;
2591 struct vm *vm;
2592 struct vmxctx *vmxctx;
2593 struct vmcs *vmcs;
2594 struct vm_exit *vmexit;
2595 struct vlapic *vlapic;
2596 uint32_t exit_reason;
2597
2598 vmx = arg;
2599 vm = vmx->vm;
2600 vmcs = &vmx->vmcs[vcpu];
2601 vmxctx = &vmx->ctx[vcpu];
2602 vlapic = vm_lapic(vm, vcpu);
2603 vmexit = vm_exitinfo(vm, vcpu);
2604 launched = 0;
2605
2606 KASSERT(vmxctx->pmap == pmap,
2607 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2608
2609 vmx_msr_guest_enter(vmx, vcpu);
2610
2611 VMPTRLD(vmcs);
2612
2613 /*
2614 * XXX
2615 * We do this every time because we may setup the virtual machine
2616 * from a different process than the one that actually runs it.
2617 *
2618 * If the life of a virtual machine was spent entirely in the context
2619 * of a single process we could do this once in vmx_vminit().
2620 */
2621 vmcs_write(VMCS_HOST_CR3, rcr3());
2622
2623 vmcs_write(VMCS_GUEST_RIP, rip);
2624 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2625 do {
2626 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2627 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2628
2629 handled = UNHANDLED;
2630 /*
2631 * Interrupts are disabled from this point on until the
2632 * guest starts executing. This is done for the following
2633 * reasons:
2634 *
2635 * If an AST is asserted on this thread after the check below,
2636 * then the IPI_AST notification will not be lost, because it
2637 * will cause a VM exit due to external interrupt as soon as
2638 * the guest state is loaded.
2639 *
2640 * A posted interrupt after 'vmx_inject_interrupts()' will
2641 * not be "lost" because it will be held pending in the host
2642 * APIC because interrupts are disabled. The pending interrupt
2643 * will be recognized as soon as the guest state is loaded.
2644 *
2645 * The same reasoning applies to the IPI generated by
2646 * pmap_invalidate_ept().
2647 */
2648 disable_intr();
2649 vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2650
2651 /*
2652 * Check for vcpu suspension after injecting events because
2653 * vmx_inject_interrupts() can suspend the vcpu due to a
2654 * triple fault.
2655 */
2656 if (vcpu_suspended(evinfo)) {
2657 enable_intr();
2658 vm_exit_suspended(vmx->vm, vcpu, rip);
2659 break;
2660 }
2661
2662 if (vcpu_rendezvous_pending(evinfo)) {
2663 enable_intr();
2664 vm_exit_rendezvous(vmx->vm, vcpu, rip);
2665 break;
2666 }
2667
2668 if (vcpu_reqidle(evinfo)) {
2669 enable_intr();
2670 vm_exit_reqidle(vmx->vm, vcpu, rip);
2671 break;
2672 }
2673
2674 if (vcpu_should_yield(vm, vcpu)) {
2675 enable_intr();
2676 vm_exit_astpending(vmx->vm, vcpu, rip);
2677 vmx_astpending_trace(vmx, vcpu, rip);
2678 handled = HANDLED;
2679 break;
2680 }
2681
2682 vmx_run_trace(vmx, vcpu);
2683 rc = vmx_enter_guest(vmxctx, vmx, launched);
2684
2685 /* Collect some information for VM exit processing */
2686 vmexit->rip = rip = vmcs_guest_rip();
2687 vmexit->inst_length = vmexit_instruction_length();
2688 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2689 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2690
2691 /* Update 'nextrip' */
2692 vmx->state[vcpu].nextrip = rip;
2693
2694 if (rc == VMX_GUEST_VMEXIT) {
2695 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2696 enable_intr();
2697 handled = vmx_exit_process(vmx, vcpu, vmexit);
2698 } else {
2699 enable_intr();
2700 vmx_exit_inst_error(vmxctx, rc, vmexit);
2701 }
2702 launched = 1;
2703 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2704 rip = vmexit->rip;
2705 } while (handled);
2706
2707 /*
2708 * If a VM exit has been handled then the exitcode must be BOGUS
2709 * If a VM exit is not handled then the exitcode must not be BOGUS
2710 */
2711 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2712 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2713 panic("Mismatch between handled (%d) and exitcode (%d)",
2714 handled, vmexit->exitcode);
2715 }
2716
2717 if (!handled)
2718 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2719
2720 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2721 vmexit->exitcode);
2722
2723 VMCLEAR(vmcs);
2724 vmx_msr_guest_exit(vmx, vcpu);
2725
2726 return (0);
2727 }
2728
2729 static void
2730 vmx_vmcleanup(void *arg)
2731 {
2732 int i;
2733 struct vmx *vmx = arg;
2734
2735 if (apic_access_virtualization(vmx, 0))
2736 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2737
2738 for (i = 0; i < VM_MAXCPU; i++)
2739 vpid_free(vmx->state[i].vpid);
2740
2741 free(vmx, M_VMX);
2742
2743 return;
2744 }
2745
2746 static register_t *
2747 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2748 {
2749
2750 switch (reg) {
2751 case VM_REG_GUEST_RAX:
2752 return (&vmxctx->guest_rax);
2753 case VM_REG_GUEST_RBX:
2754 return (&vmxctx->guest_rbx);
2755 case VM_REG_GUEST_RCX:
2756 return (&vmxctx->guest_rcx);
2757 case VM_REG_GUEST_RDX:
2758 return (&vmxctx->guest_rdx);
2759 case VM_REG_GUEST_RSI:
2760 return (&vmxctx->guest_rsi);
2761 case VM_REG_GUEST_RDI:
2762 return (&vmxctx->guest_rdi);
2763 case VM_REG_GUEST_RBP:
2764 return (&vmxctx->guest_rbp);
2765 case VM_REG_GUEST_R8:
2766 return (&vmxctx->guest_r8);
2767 case VM_REG_GUEST_R9:
2768 return (&vmxctx->guest_r9);
2769 case VM_REG_GUEST_R10:
2770 return (&vmxctx->guest_r10);
2771 case VM_REG_GUEST_R11:
2772 return (&vmxctx->guest_r11);
2773 case VM_REG_GUEST_R12:
2774 return (&vmxctx->guest_r12);
2775 case VM_REG_GUEST_R13:
2776 return (&vmxctx->guest_r13);
2777 case VM_REG_GUEST_R14:
2778 return (&vmxctx->guest_r14);
2779 case VM_REG_GUEST_R15:
2780 return (&vmxctx->guest_r15);
2781 case VM_REG_GUEST_CR2:
2782 return (&vmxctx->guest_cr2);
2783 default:
2784 break;
2785 }
2786 return (NULL);
2787 }
2788
2789 static int
2790 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2791 {
2792 register_t *regp;
2793
2794 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2795 *retval = *regp;
2796 return (0);
2797 } else
2798 return (EINVAL);
2799 }
2800
2801 static int
2802 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2803 {
2804 register_t *regp;
2805
2806 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2807 *regp = val;
2808 return (0);
2809 } else
2810 return (EINVAL);
2811 }
2812
2813 static int
2814 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2815 {
2816 uint64_t gi;
2817 int error;
2818
2819 error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2820 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2821 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2822 return (error);
2823 }
2824
2825 static int
2826 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2827 {
2828 struct vmcs *vmcs;
2829 uint64_t gi;
2830 int error, ident;
2831
2832 /*
2833 * Forcing the vcpu into an interrupt shadow is not supported.
2834 */
2835 if (val) {
2836 error = EINVAL;
2837 goto done;
2838 }
2839
2840 vmcs = &vmx->vmcs[vcpu];
2841 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2842 error = vmcs_getreg(vmcs, running, ident, &gi);
2843 if (error == 0) {
2844 gi &= ~HWINTR_BLOCKING;
2845 error = vmcs_setreg(vmcs, running, ident, gi);
2846 }
2847 done:
2848 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2849 error ? "failed" : "succeeded");
2850 return (error);
2851 }
2852
2853 static int
2854 vmx_shadow_reg(int reg)
2855 {
2856 int shreg;
2857
2858 shreg = -1;
2859
2860 switch (reg) {
2861 case VM_REG_GUEST_CR0:
2862 shreg = VMCS_CR0_SHADOW;
2863 break;
2864 case VM_REG_GUEST_CR4:
2865 shreg = VMCS_CR4_SHADOW;
2866 break;
2867 default:
2868 break;
2869 }
2870
2871 return (shreg);
2872 }
2873
2874 static int
2875 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2876 {
2877 int running, hostcpu;
2878 struct vmx *vmx = arg;
2879
2880 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2881 if (running && hostcpu != curcpu)
2882 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2883
2884 if (reg == VM_REG_GUEST_INTR_SHADOW)
2885 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2886
2887 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2888 return (0);
2889
2890 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2891 }
2892
2893 static int
2894 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2895 {
2896 int error, hostcpu, running, shadow;
2897 uint64_t ctls;
2898 pmap_t pmap;
2899 struct vmx *vmx = arg;
2900
2901 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2902 if (running && hostcpu != curcpu)
2903 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2904
2905 if (reg == VM_REG_GUEST_INTR_SHADOW)
2906 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2907
2908 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2909 return (0);
2910
2911 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2912
2913 if (error == 0) {
2914 /*
2915 * If the "load EFER" VM-entry control is 1 then the
2916 * value of EFER.LMA must be identical to "IA-32e mode guest"
2917 * bit in the VM-entry control.
2918 */
2919 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2920 (reg == VM_REG_GUEST_EFER)) {
2921 vmcs_getreg(&vmx->vmcs[vcpu], running,
2922 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2923 if (val & EFER_LMA)
2924 ctls |= VM_ENTRY_GUEST_LMA;
2925 else
2926 ctls &= ~VM_ENTRY_GUEST_LMA;
2927 vmcs_setreg(&vmx->vmcs[vcpu], running,
2928 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2929 }
2930
2931 shadow = vmx_shadow_reg(reg);
2932 if (shadow > 0) {
2933 /*
2934 * Store the unmodified value in the shadow
2935 */
2936 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2937 VMCS_IDENT(shadow), val);
2938 }
2939
2940 if (reg == VM_REG_GUEST_CR3) {
2941 /*
2942 * Invalidate the guest vcpu's TLB mappings to emulate
2943 * the behavior of updating %cr3.
2944 *
2945 * XXX the processor retains global mappings when %cr3
2946 * is updated but vmx_invvpid() does not.
2947 */
2948 pmap = vmx->ctx[vcpu].pmap;
2949 vmx_invvpid(vmx, vcpu, pmap, running);
2950 }
2951 }
2952
2953 return (error);
2954 }
2955
2956 static int
2957 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2958 {
2959 int hostcpu, running;
2960 struct vmx *vmx = arg;
2961
2962 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2963 if (running && hostcpu != curcpu)
2964 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2965
2966 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2967 }
2968
2969 static int
2970 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2971 {
2972 int hostcpu, running;
2973 struct vmx *vmx = arg;
2974
2975 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2976 if (running && hostcpu != curcpu)
2977 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2978
2979 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2980 }
2981
2982 static int
2983 vmx_getcap(void *arg, int vcpu, int type, int *retval)
2984 {
2985 struct vmx *vmx = arg;
2986 int vcap;
2987 int ret;
2988
2989 ret = ENOENT;
2990
2991 vcap = vmx->cap[vcpu].set;
2992
2993 switch (type) {
2994 case VM_CAP_HALT_EXIT:
2995 if (cap_halt_exit)
2996 ret = 0;
2997 break;
2998 case VM_CAP_PAUSE_EXIT:
2999 if (cap_pause_exit)
3000 ret = 0;
3001 break;
3002 case VM_CAP_MTRAP_EXIT:
3003 if (cap_monitor_trap)
3004 ret = 0;
3005 break;
3006 case VM_CAP_UNRESTRICTED_GUEST:
3007 if (cap_unrestricted_guest)
3008 ret = 0;
3009 break;
3010 case VM_CAP_ENABLE_INVPCID:
3011 if (cap_invpcid)
3012 ret = 0;
3013 break;
3014 default:
3015 break;
3016 }
3017
3018 if (ret == 0)
3019 *retval = (vcap & (1 << type)) ? 1 : 0;
3020
3021 return (ret);
3022 }
3023
3024 static int
3025 vmx_setcap(void *arg, int vcpu, int type, int val)
3026 {
3027 struct vmx *vmx = arg;
3028 struct vmcs *vmcs = &vmx->vmcs[vcpu];
3029 uint32_t baseval;
3030 uint32_t *pptr;
3031 int error;
3032 int flag;
3033 int reg;
3034 int retval;
3035
3036 retval = ENOENT;
3037 pptr = NULL;
3038
3039 switch (type) {
3040 case VM_CAP_HALT_EXIT:
3041 if (cap_halt_exit) {
3042 retval = 0;
3043 pptr = &vmx->cap[vcpu].proc_ctls;
3044 baseval = *pptr;
3045 flag = PROCBASED_HLT_EXITING;
3046 reg = VMCS_PRI_PROC_BASED_CTLS;
3047 }
3048 break;
3049 case VM_CAP_MTRAP_EXIT:
3050 if (cap_monitor_trap) {
3051 retval = 0;
3052 pptr = &vmx->cap[vcpu].proc_ctls;
3053 baseval = *pptr;
3054 flag = PROCBASED_MTF;
3055 reg = VMCS_PRI_PROC_BASED_CTLS;
3056 }
3057 break;
3058 case VM_CAP_PAUSE_EXIT:
3059 if (cap_pause_exit) {
3060 retval = 0;
3061 pptr = &vmx->cap[vcpu].proc_ctls;
3062 baseval = *pptr;
3063 flag = PROCBASED_PAUSE_EXITING;
3064 reg = VMCS_PRI_PROC_BASED_CTLS;
3065 }
3066 break;
3067 case VM_CAP_UNRESTRICTED_GUEST:
3068 if (cap_unrestricted_guest) {
3069 retval = 0;
3070 pptr = &vmx->cap[vcpu].proc_ctls2;
3071 baseval = *pptr;
3072 flag = PROCBASED2_UNRESTRICTED_GUEST;
3073 reg = VMCS_SEC_PROC_BASED_CTLS;
3074 }
3075 break;
3076 case VM_CAP_ENABLE_INVPCID:
3077 if (cap_invpcid) {
3078 retval = 0;
3079 pptr = &vmx->cap[vcpu].proc_ctls2;
3080 baseval = *pptr;
3081 flag = PROCBASED2_ENABLE_INVPCID;
3082 reg = VMCS_SEC_PROC_BASED_CTLS;
3083 }
3084 break;
3085 default:
3086 break;
3087 }
3088
3089 if (retval == 0) {
3090 if (val) {
3091 baseval |= flag;
3092 } else {
3093 baseval &= ~flag;
3094 }
3095 VMPTRLD(vmcs);
3096 error = vmwrite(reg, baseval);
3097 VMCLEAR(vmcs);
3098
3099 if (error) {
3100 retval = error;
3101 } else {
3102 /*
3103 * Update optional stored flags, and record
3104 * setting
3105 */
3106 if (pptr != NULL) {
3107 *pptr = baseval;
3108 }
3109
3110 if (val) {
3111 vmx->cap[vcpu].set |= (1 << type);
3112 } else {
3113 vmx->cap[vcpu].set &= ~(1 << type);
3114 }
3115 }
3116 }
3117
3118 return (retval);
3119 }
3120
3121 struct vlapic_vtx {
3122 struct vlapic vlapic;
3123 struct pir_desc *pir_desc;
3124 struct vmx *vmx;
3125 };
3126
3127 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \
3128 do { \
3129 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \
3130 level ? "level" : "edge", vector); \
3131 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3132 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3133 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3134 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3135 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3136 } while (0)
3137
3138 /*
3139 * vlapic->ops handlers that utilize the APICv hardware assist described in
3140 * Chapter 29 of the Intel SDM.
3141 */
3142 static int
3143 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3144 {
3145 struct vlapic_vtx *vlapic_vtx;
3146 struct pir_desc *pir_desc;
3147 uint64_t mask;
3148 int idx, notify;
3149
3150 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3151 pir_desc = vlapic_vtx->pir_desc;
3152
3153 /*
3154 * Keep track of interrupt requests in the PIR descriptor. This is
3155 * because the virtual APIC page pointed to by the VMCS cannot be
3156 * modified if the vcpu is running.
3157 */
3158 idx = vector / 64;
3159 mask = 1UL << (vector % 64);
3160 atomic_set_long(&pir_desc->pir[idx], mask);
3161 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3162
3163 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3164 level, "vmx_set_intr_ready");
3165 return (notify);
3166 }
3167
3168 static int
3169 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3170 {
3171 struct vlapic_vtx *vlapic_vtx;
3172 struct pir_desc *pir_desc;
3173 struct LAPIC *lapic;
3174 uint64_t pending, pirval;
3175 uint32_t ppr, vpr;
3176 int i;
3177
3178 /*
3179 * This function is only expected to be called from the 'HLT' exit
3180 * handler which does not care about the vector that is pending.
3181 */
3182 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3183
3184 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3185 pir_desc = vlapic_vtx->pir_desc;
3186
3187 pending = atomic_load_acq_long(&pir_desc->pending);
3188 if (!pending)
3189 return (0); /* common case */
3190
3191 /*
3192 * If there is an interrupt pending then it will be recognized only
3193 * if its priority is greater than the processor priority.
3194 *
3195 * Special case: if the processor priority is zero then any pending
3196 * interrupt will be recognized.
3197 */
3198 lapic = vlapic->apic_page;
3199 ppr = lapic->ppr & 0xf0;
3200 if (ppr == 0)
3201 return (1);
3202
3203 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3204 lapic->ppr);
3205
3206 for (i = 3; i >= 0; i--) {
3207 pirval = pir_desc->pir[i];
3208 if (pirval != 0) {
3209 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3210 return (vpr > ppr);
3211 }
3212 }
3213 return (0);
3214 }
3215
3216 static void
3217 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3218 {
3219
3220 panic("vmx_intr_accepted: not expected to be called");
3221 }
3222
3223 static void
3224 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3225 {
3226 struct vlapic_vtx *vlapic_vtx;
3227 struct vmx *vmx;
3228 struct vmcs *vmcs;
3229 uint64_t mask, val;
3230
3231 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3232 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3233 ("vmx_set_tmr: vcpu cannot be running"));
3234
3235 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3236 vmx = vlapic_vtx->vmx;
3237 vmcs = &vmx->vmcs[vlapic->vcpuid];
3238 mask = 1UL << (vector % 64);
3239
3240 VMPTRLD(vmcs);
3241 val = vmcs_read(VMCS_EOI_EXIT(vector));
3242 if (level)
3243 val |= mask;
3244 else
3245 val &= ~mask;
3246 vmcs_write(VMCS_EOI_EXIT(vector), val);
3247 VMCLEAR(vmcs);
3248 }
3249
3250 static void
3251 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3252 {
3253 struct vmx *vmx;
3254 struct vmcs *vmcs;
3255 uint32_t proc_ctls2;
3256 int vcpuid, error;
3257
3258 vcpuid = vlapic->vcpuid;
3259 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3260 vmcs = &vmx->vmcs[vcpuid];
3261
3262 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3263 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3264 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3265
3266 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3267 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3268 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3269
3270 VMPTRLD(vmcs);
3271 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3272 VMCLEAR(vmcs);
3273
3274 if (vlapic->vcpuid == 0) {
3275 /*
3276 * The nested page table mappings are shared by all vcpus
3277 * so unmap the APIC access page just once.
3278 */
3279 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3280 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3281 __func__, error));
3282
3283 /*
3284 * The MSR bitmap is shared by all vcpus so modify it only
3285 * once in the context of vcpu 0.
3286 */
3287 error = vmx_allow_x2apic_msrs(vmx);
3288 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3289 __func__, error));
3290 }
3291 }
3292
3293 static void
3294 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3295 {
3296
3297 ipi_cpu(hostcpu, pirvec);
3298 }
3299
3300 /*
3301 * Transfer the pending interrupts in the PIR descriptor to the IRR
3302 * in the virtual APIC page.
3303 */
3304 static void
3305 vmx_inject_pir(struct vlapic *vlapic)
3306 {
3307 struct vlapic_vtx *vlapic_vtx;
3308 struct pir_desc *pir_desc;
3309 struct LAPIC *lapic;
3310 uint64_t val, pirval;
3311 int rvi, pirbase = -1;
3312 uint16_t intr_status_old, intr_status_new;
3313
3314 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3315 pir_desc = vlapic_vtx->pir_desc;
3316 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3317 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3318 "no posted interrupt pending");
3319 return;
3320 }
3321
3322 pirval = 0;
3323 pirbase = -1;
3324 lapic = vlapic->apic_page;
3325
3326 val = atomic_readandclear_long(&pir_desc->pir[0]);
3327 if (val != 0) {
3328 lapic->irr0 |= val;
3329 lapic->irr1 |= val >> 32;
3330 pirbase = 0;
3331 pirval = val;
3332 }
3333
3334 val = atomic_readandclear_long(&pir_desc->pir[1]);
3335 if (val != 0) {
3336 lapic->irr2 |= val;
3337 lapic->irr3 |= val >> 32;
3338 pirbase = 64;
3339 pirval = val;
3340 }
3341
3342 val = atomic_readandclear_long(&pir_desc->pir[2]);
3343 if (val != 0) {
3344 lapic->irr4 |= val;
3345 lapic->irr5 |= val >> 32;
3346 pirbase = 128;
3347 pirval = val;
3348 }
3349
3350 val = atomic_readandclear_long(&pir_desc->pir[3]);
3351 if (val != 0) {
3352 lapic->irr6 |= val;
3353 lapic->irr7 |= val >> 32;
3354 pirbase = 192;
3355 pirval = val;
3356 }
3357
3358 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3359
3360 /*
3361 * Update RVI so the processor can evaluate pending virtual
3362 * interrupts on VM-entry.
3363 *
3364 * It is possible for pirval to be 0 here, even though the
3365 * pending bit has been set. The scenario is:
3366 * CPU-Y is sending a posted interrupt to CPU-X, which
3367 * is running a guest and processing posted interrupts in h/w.
3368 * CPU-X will eventually exit and the state seen in s/w is
3369 * the pending bit set, but no PIR bits set.
3370 *
3371 * CPU-X CPU-Y
3372 * (vm running) (host running)
3373 * rx posted interrupt
3374 * CLEAR pending bit
3375 * SET PIR bit
3376 * READ/CLEAR PIR bits
3377 * SET pending bit
3378 * (vm exit)
3379 * pending bit set, PIR 0
3380 */
3381 if (pirval != 0) {
3382 rvi = pirbase + flsl(pirval) - 1;
3383 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3384 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3385 if (intr_status_new > intr_status_old) {
3386 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3387 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3388 "guest_intr_status changed from 0x%04x to 0x%04x",
3389 intr_status_old, intr_status_new);
3390 }
3391 }
3392 }
3393
3394 static struct vlapic *
3395 vmx_vlapic_init(void *arg, int vcpuid)
3396 {
3397 struct vmx *vmx;
3398 struct vlapic *vlapic;
3399 struct vlapic_vtx *vlapic_vtx;
3400
3401 vmx = arg;
3402
3403 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3404 vlapic->vm = vmx->vm;
3405 vlapic->vcpuid = vcpuid;
3406 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3407
3408 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3409 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3410 vlapic_vtx->vmx = vmx;
3411
3412 if (virtual_interrupt_delivery) {
3413 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3414 vlapic->ops.pending_intr = vmx_pending_intr;
3415 vlapic->ops.intr_accepted = vmx_intr_accepted;
3416 vlapic->ops.set_tmr = vmx_set_tmr;
3417 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3418 }
3419
3420 if (posted_interrupts)
3421 vlapic->ops.post_intr = vmx_post_intr;
3422
3423 vlapic_init(vlapic);
3424
3425 return (vlapic);
3426 }
3427
3428 static void
3429 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3430 {
3431
3432 vlapic_cleanup(vlapic);
3433 free(vlapic, M_VLAPIC);
3434 }
3435
3436 struct vmm_ops vmm_ops_intel = {
3437 vmx_init,
3438 vmx_cleanup,
3439 vmx_restore,
3440 vmx_vminit,
3441 vmx_run,
3442 vmx_vmcleanup,
3443 vmx_getreg,
3444 vmx_setreg,
3445 vmx_getdesc,
3446 vmx_setdesc,
3447 vmx_getcap,
3448 vmx_setcap,
3449 ept_vmspace_alloc,
3450 ept_vmspace_free,
3451 vmx_vlapic_init,
3452 vmx_vlapic_cleanup,
3453 };
Cache object: cca06a543833a705a434ef71972dda40
|