FreeBSD/Linux Kernel Cross Reference
sys/amd64/vmm/vmm.c
1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.0/sys/amd64/vmm/vmm.c 296103 2016-02-26 16:18:47Z marcel $
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/11.0/sys/amd64/vmm/vmm.c 296103 2016-02-26 16:18:47Z marcel $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
38 #include <sys/pcpu.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/systm.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
54
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
58 #include <x86/psl.h>
59 #include <x86/apicreg.h>
60
61 #include <machine/vmm.h>
62 #include <machine/vmm_dev.h>
63 #include <machine/vmm_instruction_emul.h>
64
65 #include "vmm_ioport.h"
66 #include "vmm_ktr.h"
67 #include "vmm_host.h"
68 #include "vmm_mem.h"
69 #include "vmm_util.h"
70 #include "vatpic.h"
71 #include "vatpit.h"
72 #include "vhpet.h"
73 #include "vioapic.h"
74 #include "vlapic.h"
75 #include "vpmtmr.h"
76 #include "vrtc.h"
77 #include "vmm_stat.h"
78 #include "vmm_lapic.h"
79
80 #include "io/ppt.h"
81 #include "io/iommu.h"
82
83 struct vlapic;
84
85 /*
86 * Initialization:
87 * (a) allocated when vcpu is created
88 * (i) initialized when vcpu is created and when it is reinitialized
89 * (o) initialized the first time the vcpu is created
90 * (x) initialized before use
91 */
92 struct vcpu {
93 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
94 enum vcpu_state state; /* (o) vcpu state */
95 int hostcpu; /* (o) vcpu's host cpu */
96 int reqidle; /* (i) request vcpu to idle */
97 struct vlapic *vlapic; /* (i) APIC device model */
98 enum x2apic_state x2apic_state; /* (i) APIC mode */
99 uint64_t exitintinfo; /* (i) events pending at VM exit */
100 int nmi_pending; /* (i) NMI pending */
101 int extint_pending; /* (i) INTR pending */
102 int exception_pending; /* (i) exception pending */
103 int exc_vector; /* (x) exception collateral */
104 int exc_errcode_valid;
105 uint32_t exc_errcode;
106 struct savefpu *guestfpu; /* (a,i) guest fpu state */
107 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
108 void *stats; /* (a,i) statistics */
109 struct vm_exit exitinfo; /* (x) exit reason and collateral */
110 uint64_t nextrip; /* (x) next instruction to execute */
111 };
112
113 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
114 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
115 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
116 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
117 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
118
119 struct mem_seg {
120 size_t len;
121 bool sysmem;
122 struct vm_object *object;
123 };
124 #define VM_MAX_MEMSEGS 3
125
126 struct mem_map {
127 vm_paddr_t gpa;
128 size_t len;
129 vm_ooffset_t segoff;
130 int segid;
131 int prot;
132 int flags;
133 };
134 #define VM_MAX_MEMMAPS 4
135
136 /*
137 * Initialization:
138 * (o) initialized the first time the VM is created
139 * (i) initialized when VM is created and when it is reinitialized
140 * (x) initialized before use
141 */
142 struct vm {
143 void *cookie; /* (i) cpu-specific data */
144 void *iommu; /* (x) iommu-specific data */
145 struct vhpet *vhpet; /* (i) virtual HPET */
146 struct vioapic *vioapic; /* (i) virtual ioapic */
147 struct vatpic *vatpic; /* (i) virtual atpic */
148 struct vatpit *vatpit; /* (i) virtual atpit */
149 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
150 struct vrtc *vrtc; /* (o) virtual RTC */
151 volatile cpuset_t active_cpus; /* (i) active vcpus */
152 int suspend; /* (i) stop VM execution */
153 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
154 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
155 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
156 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
157 void *rendezvous_arg; /* (x) rendezvous func/arg */
158 vm_rendezvous_func_t rendezvous_func;
159 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
160 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
161 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
162 struct vmspace *vmspace; /* (o) guest's address space */
163 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
164 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
165 };
166
167 static int vmm_initialized;
168
169 static struct vmm_ops *ops;
170 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
171 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
172 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
173
174 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
175 #define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
176 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
177 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
178 #define VMSPACE_ALLOC(min, max) \
179 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
180 #define VMSPACE_FREE(vmspace) \
181 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
182 #define VMGETREG(vmi, vcpu, num, retval) \
183 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
184 #define VMSETREG(vmi, vcpu, num, val) \
185 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
186 #define VMGETDESC(vmi, vcpu, num, desc) \
187 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
188 #define VMSETDESC(vmi, vcpu, num, desc) \
189 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
190 #define VMGETCAP(vmi, vcpu, num, retval) \
191 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
192 #define VMSETCAP(vmi, vcpu, num, val) \
193 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
194 #define VLAPIC_INIT(vmi, vcpu) \
195 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
196 #define VLAPIC_CLEANUP(vmi, vlapic) \
197 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
198
199 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
200 #define fpu_stop_emulating() clts()
201
202 static MALLOC_DEFINE(M_VM, "vm", "vm");
203
204 /* statistics */
205 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
206
207 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
208
209 /*
210 * Halt the guest if all vcpus are executing a HLT instruction with
211 * interrupts disabled.
212 */
213 static int halt_detection_enabled = 1;
214 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
215 &halt_detection_enabled, 0,
216 "Halt VM if all vcpus execute HLT with interrupts disabled");
217
218 static int vmm_ipinum;
219 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
220 "IPI vector used for vcpu notifications");
221
222 static int trace_guest_exceptions;
223 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
224 &trace_guest_exceptions, 0,
225 "Trap into hypervisor on all guest exceptions and reflect them back");
226
227 static int vmm_force_iommu = 0;
228 TUNABLE_INT("hw.vmm.force_iommu", &vmm_force_iommu);
229 SYSCTL_INT(_hw_vmm, OID_AUTO, force_iommu, CTLFLAG_RDTUN, &vmm_force_iommu, 0,
230 "Force use of I/O MMU even if no passthrough devices were found.");
231
232 static void vm_free_memmap(struct vm *vm, int ident);
233 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
234 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
235
236 #ifdef KTR
237 static const char *
238 vcpu_state2str(enum vcpu_state state)
239 {
240
241 switch (state) {
242 case VCPU_IDLE:
243 return ("idle");
244 case VCPU_FROZEN:
245 return ("frozen");
246 case VCPU_RUNNING:
247 return ("running");
248 case VCPU_SLEEPING:
249 return ("sleeping");
250 default:
251 return ("unknown");
252 }
253 }
254 #endif
255
256 static void
257 vcpu_cleanup(struct vm *vm, int i, bool destroy)
258 {
259 struct vcpu *vcpu = &vm->vcpu[i];
260
261 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
262 if (destroy) {
263 vmm_stat_free(vcpu->stats);
264 fpu_save_area_free(vcpu->guestfpu);
265 }
266 }
267
268 static void
269 vcpu_init(struct vm *vm, int vcpu_id, bool create)
270 {
271 struct vcpu *vcpu;
272
273 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
274 ("vcpu_init: invalid vcpu %d", vcpu_id));
275
276 vcpu = &vm->vcpu[vcpu_id];
277
278 if (create) {
279 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
280 "initialized", vcpu_id));
281 vcpu_lock_init(vcpu);
282 vcpu->state = VCPU_IDLE;
283 vcpu->hostcpu = NOCPU;
284 vcpu->guestfpu = fpu_save_area_alloc();
285 vcpu->stats = vmm_stat_alloc();
286 }
287
288 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
289 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
290 vcpu->reqidle = 0;
291 vcpu->exitintinfo = 0;
292 vcpu->nmi_pending = 0;
293 vcpu->extint_pending = 0;
294 vcpu->exception_pending = 0;
295 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
296 fpu_save_area_reset(vcpu->guestfpu);
297 vmm_stat_init(vcpu->stats);
298 }
299
300 int
301 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
302 {
303
304 return (trace_guest_exceptions);
305 }
306
307 struct vm_exit *
308 vm_exitinfo(struct vm *vm, int cpuid)
309 {
310 struct vcpu *vcpu;
311
312 if (cpuid < 0 || cpuid >= VM_MAXCPU)
313 panic("vm_exitinfo: invalid cpuid %d", cpuid);
314
315 vcpu = &vm->vcpu[cpuid];
316
317 return (&vcpu->exitinfo);
318 }
319
320 static void
321 vmm_resume(void)
322 {
323 VMM_RESUME();
324 }
325
326 static int
327 vmm_init(void)
328 {
329 int error;
330
331 vmm_host_state_init();
332
333 vmm_ipinum = lapic_ipi_alloc(&IDTVEC(justreturn));
334 if (vmm_ipinum < 0)
335 vmm_ipinum = IPI_AST;
336
337 error = vmm_mem_init();
338 if (error)
339 return (error);
340
341 if (vmm_is_intel())
342 ops = &vmm_ops_intel;
343 else if (vmm_is_amd())
344 ops = &vmm_ops_amd;
345 else
346 return (ENXIO);
347
348 vmm_resume_p = vmm_resume;
349
350 return (VMM_INIT(vmm_ipinum));
351 }
352
353 static int
354 vmm_handler(module_t mod, int what, void *arg)
355 {
356 int error;
357
358 switch (what) {
359 case MOD_LOAD:
360 vmmdev_init();
361 if (vmm_force_iommu || ppt_avail_devices() > 0)
362 iommu_init();
363 error = vmm_init();
364 if (error == 0)
365 vmm_initialized = 1;
366 break;
367 case MOD_UNLOAD:
368 error = vmmdev_cleanup();
369 if (error == 0) {
370 vmm_resume_p = NULL;
371 iommu_cleanup();
372 if (vmm_ipinum != IPI_AST)
373 lapic_ipi_free(vmm_ipinum);
374 error = VMM_CLEANUP();
375 /*
376 * Something bad happened - prevent new
377 * VMs from being created
378 */
379 if (error)
380 vmm_initialized = 0;
381 }
382 break;
383 default:
384 error = 0;
385 break;
386 }
387 return (error);
388 }
389
390 static moduledata_t vmm_kmod = {
391 "vmm",
392 vmm_handler,
393 NULL
394 };
395
396 /*
397 * vmm initialization has the following dependencies:
398 *
399 * - iommu initialization must happen after the pci passthru driver has had
400 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
401 *
402 * - VT-x initialization requires smp_rendezvous() and therefore must happen
403 * after SMP is fully functional (after SI_SUB_SMP).
404 */
405 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
406 MODULE_VERSION(vmm, 1);
407
408 static void
409 vm_init(struct vm *vm, bool create)
410 {
411 int i;
412
413 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
414 vm->iommu = NULL;
415 vm->vioapic = vioapic_init(vm);
416 vm->vhpet = vhpet_init(vm);
417 vm->vatpic = vatpic_init(vm);
418 vm->vatpit = vatpit_init(vm);
419 vm->vpmtmr = vpmtmr_init(vm);
420 if (create)
421 vm->vrtc = vrtc_init(vm);
422
423 CPU_ZERO(&vm->active_cpus);
424
425 vm->suspend = 0;
426 CPU_ZERO(&vm->suspended_cpus);
427
428 for (i = 0; i < VM_MAXCPU; i++)
429 vcpu_init(vm, i, create);
430 }
431
432 int
433 vm_create(const char *name, struct vm **retvm)
434 {
435 struct vm *vm;
436 struct vmspace *vmspace;
437
438 /*
439 * If vmm.ko could not be successfully initialized then don't attempt
440 * to create the virtual machine.
441 */
442 if (!vmm_initialized)
443 return (ENXIO);
444
445 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
446 return (EINVAL);
447
448 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
449 if (vmspace == NULL)
450 return (ENOMEM);
451
452 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
453 strcpy(vm->name, name);
454 vm->vmspace = vmspace;
455 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
456
457 vm_init(vm, true);
458
459 *retvm = vm;
460 return (0);
461 }
462
463 static void
464 vm_cleanup(struct vm *vm, bool destroy)
465 {
466 struct mem_map *mm;
467 int i;
468
469 ppt_unassign_all(vm);
470
471 if (vm->iommu != NULL)
472 iommu_destroy_domain(vm->iommu);
473
474 if (destroy)
475 vrtc_cleanup(vm->vrtc);
476 else
477 vrtc_reset(vm->vrtc);
478 vpmtmr_cleanup(vm->vpmtmr);
479 vatpit_cleanup(vm->vatpit);
480 vhpet_cleanup(vm->vhpet);
481 vatpic_cleanup(vm->vatpic);
482 vioapic_cleanup(vm->vioapic);
483
484 for (i = 0; i < VM_MAXCPU; i++)
485 vcpu_cleanup(vm, i, destroy);
486
487 VMCLEANUP(vm->cookie);
488
489 /*
490 * System memory is removed from the guest address space only when
491 * the VM is destroyed. This is because the mapping remains the same
492 * across VM reset.
493 *
494 * Device memory can be relocated by the guest (e.g. using PCI BARs)
495 * so those mappings are removed on a VM reset.
496 */
497 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
498 mm = &vm->mem_maps[i];
499 if (destroy || !sysmem_mapping(vm, mm))
500 vm_free_memmap(vm, i);
501 }
502
503 if (destroy) {
504 for (i = 0; i < VM_MAX_MEMSEGS; i++)
505 vm_free_memseg(vm, i);
506
507 VMSPACE_FREE(vm->vmspace);
508 vm->vmspace = NULL;
509 }
510 }
511
512 void
513 vm_destroy(struct vm *vm)
514 {
515 vm_cleanup(vm, true);
516 free(vm, M_VM);
517 }
518
519 int
520 vm_reinit(struct vm *vm)
521 {
522 int error;
523
524 /*
525 * A virtual machine can be reset only if all vcpus are suspended.
526 */
527 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
528 vm_cleanup(vm, false);
529 vm_init(vm, false);
530 error = 0;
531 } else {
532 error = EBUSY;
533 }
534
535 return (error);
536 }
537
538 const char *
539 vm_name(struct vm *vm)
540 {
541 return (vm->name);
542 }
543
544 int
545 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
546 {
547 vm_object_t obj;
548
549 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
550 return (ENOMEM);
551 else
552 return (0);
553 }
554
555 int
556 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
557 {
558
559 vmm_mmio_free(vm->vmspace, gpa, len);
560 return (0);
561 }
562
563 /*
564 * Return 'true' if 'gpa' is allocated in the guest address space.
565 *
566 * This function is called in the context of a running vcpu which acts as
567 * an implicit lock on 'vm->mem_maps[]'.
568 */
569 bool
570 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
571 {
572 struct mem_map *mm;
573 int i;
574
575 #ifdef INVARIANTS
576 int hostcpu, state;
577 state = vcpu_get_state(vm, vcpuid, &hostcpu);
578 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
579 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
580 #endif
581
582 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
583 mm = &vm->mem_maps[i];
584 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
585 return (true); /* 'gpa' is sysmem or devmem */
586 }
587
588 if (ppt_is_mmio(vm, gpa))
589 return (true); /* 'gpa' is pci passthru mmio */
590
591 return (false);
592 }
593
594 int
595 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
596 {
597 struct mem_seg *seg;
598 vm_object_t obj;
599
600 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
601 return (EINVAL);
602
603 if (len == 0 || (len & PAGE_MASK))
604 return (EINVAL);
605
606 seg = &vm->mem_segs[ident];
607 if (seg->object != NULL) {
608 if (seg->len == len && seg->sysmem == sysmem)
609 return (EEXIST);
610 else
611 return (EINVAL);
612 }
613
614 obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
615 if (obj == NULL)
616 return (ENOMEM);
617
618 seg->len = len;
619 seg->object = obj;
620 seg->sysmem = sysmem;
621 return (0);
622 }
623
624 int
625 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
626 vm_object_t *objptr)
627 {
628 struct mem_seg *seg;
629
630 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
631 return (EINVAL);
632
633 seg = &vm->mem_segs[ident];
634 if (len)
635 *len = seg->len;
636 if (sysmem)
637 *sysmem = seg->sysmem;
638 if (objptr)
639 *objptr = seg->object;
640 return (0);
641 }
642
643 void
644 vm_free_memseg(struct vm *vm, int ident)
645 {
646 struct mem_seg *seg;
647
648 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
649 ("%s: invalid memseg ident %d", __func__, ident));
650
651 seg = &vm->mem_segs[ident];
652 if (seg->object != NULL) {
653 vm_object_deallocate(seg->object);
654 bzero(seg, sizeof(struct mem_seg));
655 }
656 }
657
658 int
659 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
660 size_t len, int prot, int flags)
661 {
662 struct mem_seg *seg;
663 struct mem_map *m, *map;
664 vm_ooffset_t last;
665 int i, error;
666
667 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
668 return (EINVAL);
669
670 if (flags & ~VM_MEMMAP_F_WIRED)
671 return (EINVAL);
672
673 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
674 return (EINVAL);
675
676 seg = &vm->mem_segs[segid];
677 if (seg->object == NULL)
678 return (EINVAL);
679
680 last = first + len;
681 if (first < 0 || first >= last || last > seg->len)
682 return (EINVAL);
683
684 if ((gpa | first | last) & PAGE_MASK)
685 return (EINVAL);
686
687 map = NULL;
688 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
689 m = &vm->mem_maps[i];
690 if (m->len == 0) {
691 map = m;
692 break;
693 }
694 }
695
696 if (map == NULL)
697 return (ENOSPC);
698
699 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
700 len, 0, VMFS_NO_SPACE, prot, prot, 0);
701 if (error != KERN_SUCCESS)
702 return (EFAULT);
703
704 vm_object_reference(seg->object);
705
706 if (flags & VM_MEMMAP_F_WIRED) {
707 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
708 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
709 if (error != KERN_SUCCESS) {
710 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
711 return (EFAULT);
712 }
713 }
714
715 map->gpa = gpa;
716 map->len = len;
717 map->segoff = first;
718 map->segid = segid;
719 map->prot = prot;
720 map->flags = flags;
721 return (0);
722 }
723
724 int
725 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
726 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
727 {
728 struct mem_map *mm, *mmnext;
729 int i;
730
731 mmnext = NULL;
732 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
733 mm = &vm->mem_maps[i];
734 if (mm->len == 0 || mm->gpa < *gpa)
735 continue;
736 if (mmnext == NULL || mm->gpa < mmnext->gpa)
737 mmnext = mm;
738 }
739
740 if (mmnext != NULL) {
741 *gpa = mmnext->gpa;
742 if (segid)
743 *segid = mmnext->segid;
744 if (segoff)
745 *segoff = mmnext->segoff;
746 if (len)
747 *len = mmnext->len;
748 if (prot)
749 *prot = mmnext->prot;
750 if (flags)
751 *flags = mmnext->flags;
752 return (0);
753 } else {
754 return (ENOENT);
755 }
756 }
757
758 static void
759 vm_free_memmap(struct vm *vm, int ident)
760 {
761 struct mem_map *mm;
762 int error;
763
764 mm = &vm->mem_maps[ident];
765 if (mm->len) {
766 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
767 mm->gpa + mm->len);
768 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
769 __func__, error));
770 bzero(mm, sizeof(struct mem_map));
771 }
772 }
773
774 static __inline bool
775 sysmem_mapping(struct vm *vm, struct mem_map *mm)
776 {
777
778 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
779 return (true);
780 else
781 return (false);
782 }
783
784 static vm_paddr_t
785 sysmem_maxaddr(struct vm *vm)
786 {
787 struct mem_map *mm;
788 vm_paddr_t maxaddr;
789 int i;
790
791 maxaddr = 0;
792 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
793 mm = &vm->mem_maps[i];
794 if (sysmem_mapping(vm, mm)) {
795 if (maxaddr < mm->gpa + mm->len)
796 maxaddr = mm->gpa + mm->len;
797 }
798 }
799 return (maxaddr);
800 }
801
802 static void
803 vm_iommu_modify(struct vm *vm, boolean_t map)
804 {
805 int i, sz;
806 vm_paddr_t gpa, hpa;
807 struct mem_map *mm;
808 void *vp, *cookie, *host_domain;
809
810 sz = PAGE_SIZE;
811 host_domain = iommu_host_domain();
812
813 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
814 mm = &vm->mem_maps[i];
815 if (!sysmem_mapping(vm, mm))
816 continue;
817
818 if (map) {
819 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
820 ("iommu map found invalid memmap %#lx/%#lx/%#x",
821 mm->gpa, mm->len, mm->flags));
822 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
823 continue;
824 mm->flags |= VM_MEMMAP_F_IOMMU;
825 } else {
826 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
827 continue;
828 mm->flags &= ~VM_MEMMAP_F_IOMMU;
829 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
830 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
831 mm->gpa, mm->len, mm->flags));
832 }
833
834 gpa = mm->gpa;
835 while (gpa < mm->gpa + mm->len) {
836 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
837 &cookie);
838 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
839 vm_name(vm), gpa));
840
841 vm_gpa_release(cookie);
842
843 hpa = DMAP_TO_PHYS((uintptr_t)vp);
844 if (map) {
845 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
846 iommu_remove_mapping(host_domain, hpa, sz);
847 } else {
848 iommu_remove_mapping(vm->iommu, gpa, sz);
849 iommu_create_mapping(host_domain, hpa, hpa, sz);
850 }
851
852 gpa += PAGE_SIZE;
853 }
854 }
855
856 /*
857 * Invalidate the cached translations associated with the domain
858 * from which pages were removed.
859 */
860 if (map)
861 iommu_invalidate_tlb(host_domain);
862 else
863 iommu_invalidate_tlb(vm->iommu);
864 }
865
866 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
867 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
868
869 int
870 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
871 {
872 int error;
873
874 error = ppt_unassign_device(vm, bus, slot, func);
875 if (error)
876 return (error);
877
878 if (ppt_assigned_devices(vm) == 0)
879 vm_iommu_unmap(vm);
880
881 return (0);
882 }
883
884 int
885 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
886 {
887 int error;
888 vm_paddr_t maxaddr;
889
890 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
891 if (ppt_assigned_devices(vm) == 0) {
892 KASSERT(vm->iommu == NULL,
893 ("vm_assign_pptdev: iommu must be NULL"));
894 maxaddr = sysmem_maxaddr(vm);
895 vm->iommu = iommu_create_domain(maxaddr);
896 vm_iommu_map(vm);
897 }
898
899 error = ppt_assign_device(vm, bus, slot, func);
900 return (error);
901 }
902
903 void *
904 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
905 void **cookie)
906 {
907 int i, count, pageoff;
908 struct mem_map *mm;
909 vm_page_t m;
910 #ifdef INVARIANTS
911 /*
912 * All vcpus are frozen by ioctls that modify the memory map
913 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
914 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
915 */
916 int state;
917 KASSERT(vcpuid >= -1 || vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d",
918 __func__, vcpuid));
919 for (i = 0; i < VM_MAXCPU; i++) {
920 if (vcpuid != -1 && vcpuid != i)
921 continue;
922 state = vcpu_get_state(vm, i, NULL);
923 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
924 __func__, state));
925 }
926 #endif
927 pageoff = gpa & PAGE_MASK;
928 if (len > PAGE_SIZE - pageoff)
929 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
930
931 count = 0;
932 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
933 mm = &vm->mem_maps[i];
934 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
935 gpa < mm->gpa + mm->len) {
936 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
937 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
938 break;
939 }
940 }
941
942 if (count == 1) {
943 *cookie = m;
944 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
945 } else {
946 *cookie = NULL;
947 return (NULL);
948 }
949 }
950
951 void
952 vm_gpa_release(void *cookie)
953 {
954 vm_page_t m = cookie;
955
956 vm_page_lock(m);
957 vm_page_unhold(m);
958 vm_page_unlock(m);
959 }
960
961 int
962 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
963 {
964
965 if (vcpu < 0 || vcpu >= VM_MAXCPU)
966 return (EINVAL);
967
968 if (reg >= VM_REG_LAST)
969 return (EINVAL);
970
971 return (VMGETREG(vm->cookie, vcpu, reg, retval));
972 }
973
974 int
975 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
976 {
977 struct vcpu *vcpu;
978 int error;
979
980 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
981 return (EINVAL);
982
983 if (reg >= VM_REG_LAST)
984 return (EINVAL);
985
986 error = VMSETREG(vm->cookie, vcpuid, reg, val);
987 if (error || reg != VM_REG_GUEST_RIP)
988 return (error);
989
990 /* Set 'nextrip' to match the value of %rip */
991 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
992 vcpu = &vm->vcpu[vcpuid];
993 vcpu->nextrip = val;
994 return (0);
995 }
996
997 static boolean_t
998 is_descriptor_table(int reg)
999 {
1000
1001 switch (reg) {
1002 case VM_REG_GUEST_IDTR:
1003 case VM_REG_GUEST_GDTR:
1004 return (TRUE);
1005 default:
1006 return (FALSE);
1007 }
1008 }
1009
1010 static boolean_t
1011 is_segment_register(int reg)
1012 {
1013
1014 switch (reg) {
1015 case VM_REG_GUEST_ES:
1016 case VM_REG_GUEST_CS:
1017 case VM_REG_GUEST_SS:
1018 case VM_REG_GUEST_DS:
1019 case VM_REG_GUEST_FS:
1020 case VM_REG_GUEST_GS:
1021 case VM_REG_GUEST_TR:
1022 case VM_REG_GUEST_LDTR:
1023 return (TRUE);
1024 default:
1025 return (FALSE);
1026 }
1027 }
1028
1029 int
1030 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1031 struct seg_desc *desc)
1032 {
1033
1034 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1035 return (EINVAL);
1036
1037 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1038 return (EINVAL);
1039
1040 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1041 }
1042
1043 int
1044 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1045 struct seg_desc *desc)
1046 {
1047 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1048 return (EINVAL);
1049
1050 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1051 return (EINVAL);
1052
1053 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1054 }
1055
1056 static void
1057 restore_guest_fpustate(struct vcpu *vcpu)
1058 {
1059
1060 /* flush host state to the pcb */
1061 fpuexit(curthread);
1062
1063 /* restore guest FPU state */
1064 fpu_stop_emulating();
1065 fpurestore(vcpu->guestfpu);
1066
1067 /* restore guest XCR0 if XSAVE is enabled in the host */
1068 if (rcr4() & CR4_XSAVE)
1069 load_xcr(0, vcpu->guest_xcr0);
1070
1071 /*
1072 * The FPU is now "dirty" with the guest's state so turn on emulation
1073 * to trap any access to the FPU by the host.
1074 */
1075 fpu_start_emulating();
1076 }
1077
1078 static void
1079 save_guest_fpustate(struct vcpu *vcpu)
1080 {
1081
1082 if ((rcr0() & CR0_TS) == 0)
1083 panic("fpu emulation not enabled in host!");
1084
1085 /* save guest XCR0 and restore host XCR0 */
1086 if (rcr4() & CR4_XSAVE) {
1087 vcpu->guest_xcr0 = rxcr(0);
1088 load_xcr(0, vmm_get_host_xcr0());
1089 }
1090
1091 /* save guest FPU state */
1092 fpu_stop_emulating();
1093 fpusave(vcpu->guestfpu);
1094 fpu_start_emulating();
1095 }
1096
1097 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1098
1099 static int
1100 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1101 bool from_idle)
1102 {
1103 struct vcpu *vcpu;
1104 int error;
1105
1106 vcpu = &vm->vcpu[vcpuid];
1107 vcpu_assert_locked(vcpu);
1108
1109 /*
1110 * State transitions from the vmmdev_ioctl() must always begin from
1111 * the VCPU_IDLE state. This guarantees that there is only a single
1112 * ioctl() operating on a vcpu at any point.
1113 */
1114 if (from_idle) {
1115 while (vcpu->state != VCPU_IDLE) {
1116 vcpu->reqidle = 1;
1117 vcpu_notify_event_locked(vcpu, false);
1118 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1119 "idle requested", vcpu_state2str(vcpu->state));
1120 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1121 }
1122 } else {
1123 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1124 "vcpu idle state"));
1125 }
1126
1127 if (vcpu->state == VCPU_RUNNING) {
1128 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1129 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1130 } else {
1131 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1132 "vcpu that is not running", vcpu->hostcpu));
1133 }
1134
1135 /*
1136 * The following state transitions are allowed:
1137 * IDLE -> FROZEN -> IDLE
1138 * FROZEN -> RUNNING -> FROZEN
1139 * FROZEN -> SLEEPING -> FROZEN
1140 */
1141 switch (vcpu->state) {
1142 case VCPU_IDLE:
1143 case VCPU_RUNNING:
1144 case VCPU_SLEEPING:
1145 error = (newstate != VCPU_FROZEN);
1146 break;
1147 case VCPU_FROZEN:
1148 error = (newstate == VCPU_FROZEN);
1149 break;
1150 default:
1151 error = 1;
1152 break;
1153 }
1154
1155 if (error)
1156 return (EBUSY);
1157
1158 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1159 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1160
1161 vcpu->state = newstate;
1162 if (newstate == VCPU_RUNNING)
1163 vcpu->hostcpu = curcpu;
1164 else
1165 vcpu->hostcpu = NOCPU;
1166
1167 if (newstate == VCPU_IDLE)
1168 wakeup(&vcpu->state);
1169
1170 return (0);
1171 }
1172
1173 static void
1174 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1175 {
1176 int error;
1177
1178 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1179 panic("Error %d setting state to %d\n", error, newstate);
1180 }
1181
1182 static void
1183 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1184 {
1185 int error;
1186
1187 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1188 panic("Error %d setting state to %d", error, newstate);
1189 }
1190
1191 static void
1192 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
1193 {
1194
1195 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
1196
1197 /*
1198 * Update 'rendezvous_func' and execute a write memory barrier to
1199 * ensure that it is visible across all host cpus. This is not needed
1200 * for correctness but it does ensure that all the vcpus will notice
1201 * that the rendezvous is requested immediately.
1202 */
1203 vm->rendezvous_func = func;
1204 wmb();
1205 }
1206
1207 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1208 do { \
1209 if (vcpuid >= 0) \
1210 VCPU_CTR0(vm, vcpuid, fmt); \
1211 else \
1212 VM_CTR0(vm, fmt); \
1213 } while (0)
1214
1215 static void
1216 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1217 {
1218
1219 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
1220 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1221
1222 mtx_lock(&vm->rendezvous_mtx);
1223 while (vm->rendezvous_func != NULL) {
1224 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1225 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1226
1227 if (vcpuid != -1 &&
1228 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1229 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1230 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1231 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1232 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1233 }
1234 if (CPU_CMP(&vm->rendezvous_req_cpus,
1235 &vm->rendezvous_done_cpus) == 0) {
1236 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1237 vm_set_rendezvous_func(vm, NULL);
1238 wakeup(&vm->rendezvous_func);
1239 break;
1240 }
1241 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1242 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1243 "vmrndv", 0);
1244 }
1245 mtx_unlock(&vm->rendezvous_mtx);
1246 }
1247
1248 /*
1249 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1250 */
1251 static int
1252 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1253 {
1254 struct vcpu *vcpu;
1255 const char *wmesg;
1256 int t, vcpu_halted, vm_halted;
1257
1258 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1259
1260 vcpu = &vm->vcpu[vcpuid];
1261 vcpu_halted = 0;
1262 vm_halted = 0;
1263
1264 vcpu_lock(vcpu);
1265 while (1) {
1266 /*
1267 * Do a final check for pending NMI or interrupts before
1268 * really putting this thread to sleep. Also check for
1269 * software events that would cause this vcpu to wakeup.
1270 *
1271 * These interrupts/events could have happened after the
1272 * vcpu returned from VMRUN() and before it acquired the
1273 * vcpu lock above.
1274 */
1275 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1276 break;
1277 if (vm_nmi_pending(vm, vcpuid))
1278 break;
1279 if (!intr_disabled) {
1280 if (vm_extint_pending(vm, vcpuid) ||
1281 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1282 break;
1283 }
1284 }
1285
1286 /* Don't go to sleep if the vcpu thread needs to yield */
1287 if (vcpu_should_yield(vm, vcpuid))
1288 break;
1289
1290 /*
1291 * Some Linux guests implement "halt" by having all vcpus
1292 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1293 * track of the vcpus that have entered this state. When all
1294 * vcpus enter the halted state the virtual machine is halted.
1295 */
1296 if (intr_disabled) {
1297 wmesg = "vmhalt";
1298 VCPU_CTR0(vm, vcpuid, "Halted");
1299 if (!vcpu_halted && halt_detection_enabled) {
1300 vcpu_halted = 1;
1301 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1302 }
1303 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1304 vm_halted = 1;
1305 break;
1306 }
1307 } else {
1308 wmesg = "vmidle";
1309 }
1310
1311 t = ticks;
1312 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1313 /*
1314 * XXX msleep_spin() cannot be interrupted by signals so
1315 * wake up periodically to check pending signals.
1316 */
1317 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1318 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1319 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1320 }
1321
1322 if (vcpu_halted)
1323 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1324
1325 vcpu_unlock(vcpu);
1326
1327 if (vm_halted)
1328 vm_suspend(vm, VM_SUSPEND_HALT);
1329
1330 return (0);
1331 }
1332
1333 static int
1334 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1335 {
1336 int rv, ftype;
1337 struct vm_map *map;
1338 struct vcpu *vcpu;
1339 struct vm_exit *vme;
1340
1341 vcpu = &vm->vcpu[vcpuid];
1342 vme = &vcpu->exitinfo;
1343
1344 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1345 __func__, vme->inst_length));
1346
1347 ftype = vme->u.paging.fault_type;
1348 KASSERT(ftype == VM_PROT_READ ||
1349 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1350 ("vm_handle_paging: invalid fault_type %d", ftype));
1351
1352 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1353 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1354 vme->u.paging.gpa, ftype);
1355 if (rv == 0) {
1356 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1357 ftype == VM_PROT_READ ? "accessed" : "dirty",
1358 vme->u.paging.gpa);
1359 goto done;
1360 }
1361 }
1362
1363 map = &vm->vmspace->vm_map;
1364 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1365
1366 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1367 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1368
1369 if (rv != KERN_SUCCESS)
1370 return (EFAULT);
1371 done:
1372 return (0);
1373 }
1374
1375 static int
1376 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1377 {
1378 struct vie *vie;
1379 struct vcpu *vcpu;
1380 struct vm_exit *vme;
1381 uint64_t gla, gpa, cs_base;
1382 struct vm_guest_paging *paging;
1383 mem_region_read_t mread;
1384 mem_region_write_t mwrite;
1385 enum vm_cpu_mode cpu_mode;
1386 int cs_d, error, fault;
1387
1388 vcpu = &vm->vcpu[vcpuid];
1389 vme = &vcpu->exitinfo;
1390
1391 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1392 __func__, vme->inst_length));
1393
1394 gla = vme->u.inst_emul.gla;
1395 gpa = vme->u.inst_emul.gpa;
1396 cs_base = vme->u.inst_emul.cs_base;
1397 cs_d = vme->u.inst_emul.cs_d;
1398 vie = &vme->u.inst_emul.vie;
1399 paging = &vme->u.inst_emul.paging;
1400 cpu_mode = paging->cpu_mode;
1401
1402 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1403
1404 /* Fetch, decode and emulate the faulting instruction */
1405 if (vie->num_valid == 0) {
1406 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1407 cs_base, VIE_INST_SIZE, vie, &fault);
1408 } else {
1409 /*
1410 * The instruction bytes have already been copied into 'vie'
1411 */
1412 error = fault = 0;
1413 }
1414 if (error || fault)
1415 return (error);
1416
1417 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1418 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1419 vme->rip + cs_base);
1420 *retu = true; /* dump instruction bytes in userspace */
1421 return (0);
1422 }
1423
1424 /*
1425 * Update 'nextrip' based on the length of the emulated instruction.
1426 */
1427 vme->inst_length = vie->num_processed;
1428 vcpu->nextrip += vie->num_processed;
1429 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1430 "decoding", vcpu->nextrip);
1431
1432 /* return to userland unless this is an in-kernel emulated device */
1433 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1434 mread = lapic_mmio_read;
1435 mwrite = lapic_mmio_write;
1436 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1437 mread = vioapic_mmio_read;
1438 mwrite = vioapic_mmio_write;
1439 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1440 mread = vhpet_mmio_read;
1441 mwrite = vhpet_mmio_write;
1442 } else {
1443 *retu = true;
1444 return (0);
1445 }
1446
1447 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1448 mread, mwrite, retu);
1449
1450 return (error);
1451 }
1452
1453 static int
1454 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1455 {
1456 int i, done;
1457 struct vcpu *vcpu;
1458
1459 done = 0;
1460 vcpu = &vm->vcpu[vcpuid];
1461
1462 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1463
1464 /*
1465 * Wait until all 'active_cpus' have suspended themselves.
1466 *
1467 * Since a VM may be suspended at any time including when one or
1468 * more vcpus are doing a rendezvous we need to call the rendezvous
1469 * handler while we are waiting to prevent a deadlock.
1470 */
1471 vcpu_lock(vcpu);
1472 while (1) {
1473 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1474 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1475 break;
1476 }
1477
1478 if (vm->rendezvous_func == NULL) {
1479 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1480 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1481 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1482 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1483 } else {
1484 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1485 vcpu_unlock(vcpu);
1486 vm_handle_rendezvous(vm, vcpuid);
1487 vcpu_lock(vcpu);
1488 }
1489 }
1490 vcpu_unlock(vcpu);
1491
1492 /*
1493 * Wakeup the other sleeping vcpus and return to userspace.
1494 */
1495 for (i = 0; i < VM_MAXCPU; i++) {
1496 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1497 vcpu_notify_event(vm, i, false);
1498 }
1499 }
1500
1501 *retu = true;
1502 return (0);
1503 }
1504
1505 static int
1506 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1507 {
1508 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1509
1510 vcpu_lock(vcpu);
1511 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1512 vcpu->reqidle = 0;
1513 vcpu_unlock(vcpu);
1514 *retu = true;
1515 return (0);
1516 }
1517
1518 int
1519 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1520 {
1521 int i;
1522
1523 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1524 return (EINVAL);
1525
1526 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1527 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1528 vm->suspend, how);
1529 return (EALREADY);
1530 }
1531
1532 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1533
1534 /*
1535 * Notify all active vcpus that they are now suspended.
1536 */
1537 for (i = 0; i < VM_MAXCPU; i++) {
1538 if (CPU_ISSET(i, &vm->active_cpus))
1539 vcpu_notify_event(vm, i, false);
1540 }
1541
1542 return (0);
1543 }
1544
1545 void
1546 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1547 {
1548 struct vm_exit *vmexit;
1549
1550 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1551 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1552
1553 vmexit = vm_exitinfo(vm, vcpuid);
1554 vmexit->rip = rip;
1555 vmexit->inst_length = 0;
1556 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1557 vmexit->u.suspended.how = vm->suspend;
1558 }
1559
1560 void
1561 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1562 {
1563 struct vm_exit *vmexit;
1564
1565 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1566
1567 vmexit = vm_exitinfo(vm, vcpuid);
1568 vmexit->rip = rip;
1569 vmexit->inst_length = 0;
1570 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1571 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1572 }
1573
1574 void
1575 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1576 {
1577 struct vm_exit *vmexit;
1578
1579 vmexit = vm_exitinfo(vm, vcpuid);
1580 vmexit->rip = rip;
1581 vmexit->inst_length = 0;
1582 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1583 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1584 }
1585
1586 void
1587 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1588 {
1589 struct vm_exit *vmexit;
1590
1591 vmexit = vm_exitinfo(vm, vcpuid);
1592 vmexit->rip = rip;
1593 vmexit->inst_length = 0;
1594 vmexit->exitcode = VM_EXITCODE_BOGUS;
1595 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1596 }
1597
1598 int
1599 vm_run(struct vm *vm, struct vm_run *vmrun)
1600 {
1601 struct vm_eventinfo evinfo;
1602 int error, vcpuid;
1603 struct vcpu *vcpu;
1604 struct pcb *pcb;
1605 uint64_t tscval;
1606 struct vm_exit *vme;
1607 bool retu, intr_disabled;
1608 pmap_t pmap;
1609
1610 vcpuid = vmrun->cpuid;
1611
1612 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1613 return (EINVAL);
1614
1615 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1616 return (EINVAL);
1617
1618 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1619 return (EINVAL);
1620
1621 pmap = vmspace_pmap(vm->vmspace);
1622 vcpu = &vm->vcpu[vcpuid];
1623 vme = &vcpu->exitinfo;
1624 evinfo.rptr = &vm->rendezvous_func;
1625 evinfo.sptr = &vm->suspend;
1626 evinfo.iptr = &vcpu->reqidle;
1627 restart:
1628 critical_enter();
1629
1630 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1631 ("vm_run: absurd pm_active"));
1632
1633 tscval = rdtsc();
1634
1635 pcb = PCPU_GET(curpcb);
1636 set_pcb_flags(pcb, PCB_FULL_IRET);
1637
1638 restore_guest_fpustate(vcpu);
1639
1640 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1641 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1642 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1643
1644 save_guest_fpustate(vcpu);
1645
1646 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1647
1648 critical_exit();
1649
1650 if (error == 0) {
1651 retu = false;
1652 vcpu->nextrip = vme->rip + vme->inst_length;
1653 switch (vme->exitcode) {
1654 case VM_EXITCODE_REQIDLE:
1655 error = vm_handle_reqidle(vm, vcpuid, &retu);
1656 break;
1657 case VM_EXITCODE_SUSPENDED:
1658 error = vm_handle_suspend(vm, vcpuid, &retu);
1659 break;
1660 case VM_EXITCODE_IOAPIC_EOI:
1661 vioapic_process_eoi(vm, vcpuid,
1662 vme->u.ioapic_eoi.vector);
1663 break;
1664 case VM_EXITCODE_RENDEZVOUS:
1665 vm_handle_rendezvous(vm, vcpuid);
1666 error = 0;
1667 break;
1668 case VM_EXITCODE_HLT:
1669 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1670 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1671 break;
1672 case VM_EXITCODE_PAGING:
1673 error = vm_handle_paging(vm, vcpuid, &retu);
1674 break;
1675 case VM_EXITCODE_INST_EMUL:
1676 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1677 break;
1678 case VM_EXITCODE_INOUT:
1679 case VM_EXITCODE_INOUT_STR:
1680 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1681 break;
1682 case VM_EXITCODE_MONITOR:
1683 case VM_EXITCODE_MWAIT:
1684 vm_inject_ud(vm, vcpuid);
1685 break;
1686 default:
1687 retu = true; /* handled in userland */
1688 break;
1689 }
1690 }
1691
1692 if (error == 0 && retu == false)
1693 goto restart;
1694
1695 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1696
1697 /* copy the exit information */
1698 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1699 return (error);
1700 }
1701
1702 int
1703 vm_restart_instruction(void *arg, int vcpuid)
1704 {
1705 struct vm *vm;
1706 struct vcpu *vcpu;
1707 enum vcpu_state state;
1708 uint64_t rip;
1709 int error;
1710
1711 vm = arg;
1712 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1713 return (EINVAL);
1714
1715 vcpu = &vm->vcpu[vcpuid];
1716 state = vcpu_get_state(vm, vcpuid, NULL);
1717 if (state == VCPU_RUNNING) {
1718 /*
1719 * When a vcpu is "running" the next instruction is determined
1720 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1721 * Thus setting 'inst_length' to zero will cause the current
1722 * instruction to be restarted.
1723 */
1724 vcpu->exitinfo.inst_length = 0;
1725 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1726 "setting inst_length to zero", vcpu->exitinfo.rip);
1727 } else if (state == VCPU_FROZEN) {
1728 /*
1729 * When a vcpu is "frozen" it is outside the critical section
1730 * around VMRUN() and 'nextrip' points to the next instruction.
1731 * Thus instruction restart is achieved by setting 'nextrip'
1732 * to the vcpu's %rip.
1733 */
1734 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1735 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1736 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1737 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1738 vcpu->nextrip = rip;
1739 } else {
1740 panic("%s: invalid state %d", __func__, state);
1741 }
1742 return (0);
1743 }
1744
1745 int
1746 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1747 {
1748 struct vcpu *vcpu;
1749 int type, vector;
1750
1751 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1752 return (EINVAL);
1753
1754 vcpu = &vm->vcpu[vcpuid];
1755
1756 if (info & VM_INTINFO_VALID) {
1757 type = info & VM_INTINFO_TYPE;
1758 vector = info & 0xff;
1759 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1760 return (EINVAL);
1761 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1762 return (EINVAL);
1763 if (info & VM_INTINFO_RSVD)
1764 return (EINVAL);
1765 } else {
1766 info = 0;
1767 }
1768 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1769 vcpu->exitintinfo = info;
1770 return (0);
1771 }
1772
1773 enum exc_class {
1774 EXC_BENIGN,
1775 EXC_CONTRIBUTORY,
1776 EXC_PAGEFAULT
1777 };
1778
1779 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1780
1781 static enum exc_class
1782 exception_class(uint64_t info)
1783 {
1784 int type, vector;
1785
1786 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1787 type = info & VM_INTINFO_TYPE;
1788 vector = info & 0xff;
1789
1790 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1791 switch (type) {
1792 case VM_INTINFO_HWINTR:
1793 case VM_INTINFO_SWINTR:
1794 case VM_INTINFO_NMI:
1795 return (EXC_BENIGN);
1796 default:
1797 /*
1798 * Hardware exception.
1799 *
1800 * SVM and VT-x use identical type values to represent NMI,
1801 * hardware interrupt and software interrupt.
1802 *
1803 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1804 * for exceptions except #BP and #OF. #BP and #OF use a type
1805 * value of '5' or '6'. Therefore we don't check for explicit
1806 * values of 'type' to classify 'intinfo' into a hardware
1807 * exception.
1808 */
1809 break;
1810 }
1811
1812 switch (vector) {
1813 case IDT_PF:
1814 case IDT_VE:
1815 return (EXC_PAGEFAULT);
1816 case IDT_DE:
1817 case IDT_TS:
1818 case IDT_NP:
1819 case IDT_SS:
1820 case IDT_GP:
1821 return (EXC_CONTRIBUTORY);
1822 default:
1823 return (EXC_BENIGN);
1824 }
1825 }
1826
1827 static int
1828 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1829 uint64_t *retinfo)
1830 {
1831 enum exc_class exc1, exc2;
1832 int type1, vector1;
1833
1834 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1835 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1836
1837 /*
1838 * If an exception occurs while attempting to call the double-fault
1839 * handler the processor enters shutdown mode (aka triple fault).
1840 */
1841 type1 = info1 & VM_INTINFO_TYPE;
1842 vector1 = info1 & 0xff;
1843 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1844 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1845 info1, info2);
1846 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1847 *retinfo = 0;
1848 return (0);
1849 }
1850
1851 /*
1852 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1853 */
1854 exc1 = exception_class(info1);
1855 exc2 = exception_class(info2);
1856 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1857 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1858 /* Convert nested fault into a double fault. */
1859 *retinfo = IDT_DF;
1860 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1861 *retinfo |= VM_INTINFO_DEL_ERRCODE;
1862 } else {
1863 /* Handle exceptions serially */
1864 *retinfo = info2;
1865 }
1866 return (1);
1867 }
1868
1869 static uint64_t
1870 vcpu_exception_intinfo(struct vcpu *vcpu)
1871 {
1872 uint64_t info = 0;
1873
1874 if (vcpu->exception_pending) {
1875 info = vcpu->exc_vector & 0xff;
1876 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1877 if (vcpu->exc_errcode_valid) {
1878 info |= VM_INTINFO_DEL_ERRCODE;
1879 info |= (uint64_t)vcpu->exc_errcode << 32;
1880 }
1881 }
1882 return (info);
1883 }
1884
1885 int
1886 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1887 {
1888 struct vcpu *vcpu;
1889 uint64_t info1, info2;
1890 int valid;
1891
1892 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
1893
1894 vcpu = &vm->vcpu[vcpuid];
1895
1896 info1 = vcpu->exitintinfo;
1897 vcpu->exitintinfo = 0;
1898
1899 info2 = 0;
1900 if (vcpu->exception_pending) {
1901 info2 = vcpu_exception_intinfo(vcpu);
1902 vcpu->exception_pending = 0;
1903 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1904 vcpu->exc_vector, info2);
1905 }
1906
1907 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1908 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1909 } else if (info1 & VM_INTINFO_VALID) {
1910 *retinfo = info1;
1911 valid = 1;
1912 } else if (info2 & VM_INTINFO_VALID) {
1913 *retinfo = info2;
1914 valid = 1;
1915 } else {
1916 valid = 0;
1917 }
1918
1919 if (valid) {
1920 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1921 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1922 }
1923
1924 return (valid);
1925 }
1926
1927 int
1928 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1929 {
1930 struct vcpu *vcpu;
1931
1932 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1933 return (EINVAL);
1934
1935 vcpu = &vm->vcpu[vcpuid];
1936 *info1 = vcpu->exitintinfo;
1937 *info2 = vcpu_exception_intinfo(vcpu);
1938 return (0);
1939 }
1940
1941 int
1942 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
1943 uint32_t errcode, int restart_instruction)
1944 {
1945 struct vcpu *vcpu;
1946 uint64_t regval;
1947 int error;
1948
1949 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1950 return (EINVAL);
1951
1952 if (vector < 0 || vector >= 32)
1953 return (EINVAL);
1954
1955 /*
1956 * A double fault exception should never be injected directly into
1957 * the guest. It is a derived exception that results from specific
1958 * combinations of nested faults.
1959 */
1960 if (vector == IDT_DF)
1961 return (EINVAL);
1962
1963 vcpu = &vm->vcpu[vcpuid];
1964
1965 if (vcpu->exception_pending) {
1966 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
1967 "pending exception %d", vector, vcpu->exc_vector);
1968 return (EBUSY);
1969 }
1970
1971 if (errcode_valid) {
1972 /*
1973 * Exceptions don't deliver an error code in real mode.
1974 */
1975 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val);
1976 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
1977 if (!(regval & CR0_PE))
1978 errcode_valid = 0;
1979 }
1980
1981 /*
1982 * From section 26.6.1 "Interruptibility State" in Intel SDM:
1983 *
1984 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
1985 * one instruction or incurs an exception.
1986 */
1987 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
1988 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
1989 __func__, error));
1990
1991 if (restart_instruction)
1992 vm_restart_instruction(vm, vcpuid);
1993
1994 vcpu->exception_pending = 1;
1995 vcpu->exc_vector = vector;
1996 vcpu->exc_errcode = errcode;
1997 vcpu->exc_errcode_valid = errcode_valid;
1998 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
1999 return (0);
2000 }
2001
2002 void
2003 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
2004 int errcode)
2005 {
2006 struct vm *vm;
2007 int error, restart_instruction;
2008
2009 vm = vmarg;
2010 restart_instruction = 1;
2011
2012 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2013 errcode, restart_instruction);
2014 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2015 }
2016
2017 void
2018 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
2019 {
2020 struct vm *vm;
2021 int error;
2022
2023 vm = vmarg;
2024 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2025 error_code, cr2);
2026
2027 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2028 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2029
2030 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2031 }
2032
2033 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2034
2035 int
2036 vm_inject_nmi(struct vm *vm, int vcpuid)
2037 {
2038 struct vcpu *vcpu;
2039
2040 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2041 return (EINVAL);
2042
2043 vcpu = &vm->vcpu[vcpuid];
2044
2045 vcpu->nmi_pending = 1;
2046 vcpu_notify_event(vm, vcpuid, false);
2047 return (0);
2048 }
2049
2050 int
2051 vm_nmi_pending(struct vm *vm, int vcpuid)
2052 {
2053 struct vcpu *vcpu;
2054
2055 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2056 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2057
2058 vcpu = &vm->vcpu[vcpuid];
2059
2060 return (vcpu->nmi_pending);
2061 }
2062
2063 void
2064 vm_nmi_clear(struct vm *vm, int vcpuid)
2065 {
2066 struct vcpu *vcpu;
2067
2068 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2069 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2070
2071 vcpu = &vm->vcpu[vcpuid];
2072
2073 if (vcpu->nmi_pending == 0)
2074 panic("vm_nmi_clear: inconsistent nmi_pending state");
2075
2076 vcpu->nmi_pending = 0;
2077 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2078 }
2079
2080 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2081
2082 int
2083 vm_inject_extint(struct vm *vm, int vcpuid)
2084 {
2085 struct vcpu *vcpu;
2086
2087 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2088 return (EINVAL);
2089
2090 vcpu = &vm->vcpu[vcpuid];
2091
2092 vcpu->extint_pending = 1;
2093 vcpu_notify_event(vm, vcpuid, false);
2094 return (0);
2095 }
2096
2097 int
2098 vm_extint_pending(struct vm *vm, int vcpuid)
2099 {
2100 struct vcpu *vcpu;
2101
2102 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2103 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2104
2105 vcpu = &vm->vcpu[vcpuid];
2106
2107 return (vcpu->extint_pending);
2108 }
2109
2110 void
2111 vm_extint_clear(struct vm *vm, int vcpuid)
2112 {
2113 struct vcpu *vcpu;
2114
2115 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2116 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2117
2118 vcpu = &vm->vcpu[vcpuid];
2119
2120 if (vcpu->extint_pending == 0)
2121 panic("vm_extint_clear: inconsistent extint_pending state");
2122
2123 vcpu->extint_pending = 0;
2124 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2125 }
2126
2127 int
2128 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2129 {
2130 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2131 return (EINVAL);
2132
2133 if (type < 0 || type >= VM_CAP_MAX)
2134 return (EINVAL);
2135
2136 return (VMGETCAP(vm->cookie, vcpu, type, retval));
2137 }
2138
2139 int
2140 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2141 {
2142 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2143 return (EINVAL);
2144
2145 if (type < 0 || type >= VM_CAP_MAX)
2146 return (EINVAL);
2147
2148 return (VMSETCAP(vm->cookie, vcpu, type, val));
2149 }
2150
2151 struct vlapic *
2152 vm_lapic(struct vm *vm, int cpu)
2153 {
2154 return (vm->vcpu[cpu].vlapic);
2155 }
2156
2157 struct vioapic *
2158 vm_ioapic(struct vm *vm)
2159 {
2160
2161 return (vm->vioapic);
2162 }
2163
2164 struct vhpet *
2165 vm_hpet(struct vm *vm)
2166 {
2167
2168 return (vm->vhpet);
2169 }
2170
2171 boolean_t
2172 vmm_is_pptdev(int bus, int slot, int func)
2173 {
2174 int found, i, n;
2175 int b, s, f;
2176 char *val, *cp, *cp2;
2177
2178 /*
2179 * XXX
2180 * The length of an environment variable is limited to 128 bytes which
2181 * puts an upper limit on the number of passthru devices that may be
2182 * specified using a single environment variable.
2183 *
2184 * Work around this by scanning multiple environment variable
2185 * names instead of a single one - yuck!
2186 */
2187 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2188
2189 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2190 found = 0;
2191 for (i = 0; names[i] != NULL && !found; i++) {
2192 cp = val = kern_getenv(names[i]);
2193 while (cp != NULL && *cp != '\0') {
2194 if ((cp2 = strchr(cp, ' ')) != NULL)
2195 *cp2 = '\0';
2196
2197 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2198 if (n == 3 && bus == b && slot == s && func == f) {
2199 found = 1;
2200 break;
2201 }
2202
2203 if (cp2 != NULL)
2204 *cp2++ = ' ';
2205
2206 cp = cp2;
2207 }
2208 freeenv(val);
2209 }
2210 return (found);
2211 }
2212
2213 void *
2214 vm_iommu_domain(struct vm *vm)
2215 {
2216
2217 return (vm->iommu);
2218 }
2219
2220 int
2221 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2222 bool from_idle)
2223 {
2224 int error;
2225 struct vcpu *vcpu;
2226
2227 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2228 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2229
2230 vcpu = &vm->vcpu[vcpuid];
2231
2232 vcpu_lock(vcpu);
2233 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2234 vcpu_unlock(vcpu);
2235
2236 return (error);
2237 }
2238
2239 enum vcpu_state
2240 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2241 {
2242 struct vcpu *vcpu;
2243 enum vcpu_state state;
2244
2245 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2246 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2247
2248 vcpu = &vm->vcpu[vcpuid];
2249
2250 vcpu_lock(vcpu);
2251 state = vcpu->state;
2252 if (hostcpu != NULL)
2253 *hostcpu = vcpu->hostcpu;
2254 vcpu_unlock(vcpu);
2255
2256 return (state);
2257 }
2258
2259 int
2260 vm_activate_cpu(struct vm *vm, int vcpuid)
2261 {
2262
2263 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2264 return (EINVAL);
2265
2266 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2267 return (EBUSY);
2268
2269 VCPU_CTR0(vm, vcpuid, "activated");
2270 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2271 return (0);
2272 }
2273
2274 cpuset_t
2275 vm_active_cpus(struct vm *vm)
2276 {
2277
2278 return (vm->active_cpus);
2279 }
2280
2281 cpuset_t
2282 vm_suspended_cpus(struct vm *vm)
2283 {
2284
2285 return (vm->suspended_cpus);
2286 }
2287
2288 void *
2289 vcpu_stats(struct vm *vm, int vcpuid)
2290 {
2291
2292 return (vm->vcpu[vcpuid].stats);
2293 }
2294
2295 int
2296 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2297 {
2298 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2299 return (EINVAL);
2300
2301 *state = vm->vcpu[vcpuid].x2apic_state;
2302
2303 return (0);
2304 }
2305
2306 int
2307 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2308 {
2309 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2310 return (EINVAL);
2311
2312 if (state >= X2APIC_STATE_LAST)
2313 return (EINVAL);
2314
2315 vm->vcpu[vcpuid].x2apic_state = state;
2316
2317 vlapic_set_x2apic_state(vm, vcpuid, state);
2318
2319 return (0);
2320 }
2321
2322 /*
2323 * This function is called to ensure that a vcpu "sees" a pending event
2324 * as soon as possible:
2325 * - If the vcpu thread is sleeping then it is woken up.
2326 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2327 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2328 */
2329 static void
2330 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2331 {
2332 int hostcpu;
2333
2334 hostcpu = vcpu->hostcpu;
2335 if (vcpu->state == VCPU_RUNNING) {
2336 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2337 if (hostcpu != curcpu) {
2338 if (lapic_intr) {
2339 vlapic_post_intr(vcpu->vlapic, hostcpu,
2340 vmm_ipinum);
2341 } else {
2342 ipi_cpu(hostcpu, vmm_ipinum);
2343 }
2344 } else {
2345 /*
2346 * If the 'vcpu' is running on 'curcpu' then it must
2347 * be sending a notification to itself (e.g. SELF_IPI).
2348 * The pending event will be picked up when the vcpu
2349 * transitions back to guest context.
2350 */
2351 }
2352 } else {
2353 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2354 "with hostcpu %d", vcpu->state, hostcpu));
2355 if (vcpu->state == VCPU_SLEEPING)
2356 wakeup_one(vcpu);
2357 }
2358 }
2359
2360 void
2361 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2362 {
2363 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2364
2365 vcpu_lock(vcpu);
2366 vcpu_notify_event_locked(vcpu, lapic_intr);
2367 vcpu_unlock(vcpu);
2368 }
2369
2370 struct vmspace *
2371 vm_get_vmspace(struct vm *vm)
2372 {
2373
2374 return (vm->vmspace);
2375 }
2376
2377 int
2378 vm_apicid2vcpuid(struct vm *vm, int apicid)
2379 {
2380 /*
2381 * XXX apic id is assumed to be numerically identical to vcpu id
2382 */
2383 return (apicid);
2384 }
2385
2386 void
2387 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2388 vm_rendezvous_func_t func, void *arg)
2389 {
2390 int i;
2391
2392 /*
2393 * Enforce that this function is called without any locks
2394 */
2395 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2396 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
2397 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2398
2399 restart:
2400 mtx_lock(&vm->rendezvous_mtx);
2401 if (vm->rendezvous_func != NULL) {
2402 /*
2403 * If a rendezvous is already in progress then we need to
2404 * call the rendezvous handler in case this 'vcpuid' is one
2405 * of the targets of the rendezvous.
2406 */
2407 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2408 mtx_unlock(&vm->rendezvous_mtx);
2409 vm_handle_rendezvous(vm, vcpuid);
2410 goto restart;
2411 }
2412 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2413 "rendezvous is still in progress"));
2414
2415 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2416 vm->rendezvous_req_cpus = dest;
2417 CPU_ZERO(&vm->rendezvous_done_cpus);
2418 vm->rendezvous_arg = arg;
2419 vm_set_rendezvous_func(vm, func);
2420 mtx_unlock(&vm->rendezvous_mtx);
2421
2422 /*
2423 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2424 * vcpus so they handle the rendezvous as soon as possible.
2425 */
2426 for (i = 0; i < VM_MAXCPU; i++) {
2427 if (CPU_ISSET(i, &dest))
2428 vcpu_notify_event(vm, i, false);
2429 }
2430
2431 vm_handle_rendezvous(vm, vcpuid);
2432 }
2433
2434 struct vatpic *
2435 vm_atpic(struct vm *vm)
2436 {
2437 return (vm->vatpic);
2438 }
2439
2440 struct vatpit *
2441 vm_atpit(struct vm *vm)
2442 {
2443 return (vm->vatpit);
2444 }
2445
2446 struct vpmtmr *
2447 vm_pmtmr(struct vm *vm)
2448 {
2449
2450 return (vm->vpmtmr);
2451 }
2452
2453 struct vrtc *
2454 vm_rtc(struct vm *vm)
2455 {
2456
2457 return (vm->vrtc);
2458 }
2459
2460 enum vm_reg_name
2461 vm_segment_name(int seg)
2462 {
2463 static enum vm_reg_name seg_names[] = {
2464 VM_REG_GUEST_ES,
2465 VM_REG_GUEST_CS,
2466 VM_REG_GUEST_SS,
2467 VM_REG_GUEST_DS,
2468 VM_REG_GUEST_FS,
2469 VM_REG_GUEST_GS
2470 };
2471
2472 KASSERT(seg >= 0 && seg < nitems(seg_names),
2473 ("%s: invalid segment encoding %d", __func__, seg));
2474 return (seg_names[seg]);
2475 }
2476
2477 void
2478 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2479 int num_copyinfo)
2480 {
2481 int idx;
2482
2483 for (idx = 0; idx < num_copyinfo; idx++) {
2484 if (copyinfo[idx].cookie != NULL)
2485 vm_gpa_release(copyinfo[idx].cookie);
2486 }
2487 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2488 }
2489
2490 int
2491 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2492 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2493 int num_copyinfo, int *fault)
2494 {
2495 int error, idx, nused;
2496 size_t n, off, remaining;
2497 void *hva, *cookie;
2498 uint64_t gpa;
2499
2500 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2501
2502 nused = 0;
2503 remaining = len;
2504 while (remaining > 0) {
2505 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2506 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2507 if (error || *fault)
2508 return (error);
2509 off = gpa & PAGE_MASK;
2510 n = min(remaining, PAGE_SIZE - off);
2511 copyinfo[nused].gpa = gpa;
2512 copyinfo[nused].len = n;
2513 remaining -= n;
2514 gla += n;
2515 nused++;
2516 }
2517
2518 for (idx = 0; idx < nused; idx++) {
2519 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2520 copyinfo[idx].len, prot, &cookie);
2521 if (hva == NULL)
2522 break;
2523 copyinfo[idx].hva = hva;
2524 copyinfo[idx].cookie = cookie;
2525 }
2526
2527 if (idx != nused) {
2528 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2529 return (EFAULT);
2530 } else {
2531 *fault = 0;
2532 return (0);
2533 }
2534 }
2535
2536 void
2537 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2538 size_t len)
2539 {
2540 char *dst;
2541 int idx;
2542
2543 dst = kaddr;
2544 idx = 0;
2545 while (len > 0) {
2546 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2547 len -= copyinfo[idx].len;
2548 dst += copyinfo[idx].len;
2549 idx++;
2550 }
2551 }
2552
2553 void
2554 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2555 struct vm_copyinfo *copyinfo, size_t len)
2556 {
2557 const char *src;
2558 int idx;
2559
2560 src = kaddr;
2561 idx = 0;
2562 while (len > 0) {
2563 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2564 len -= copyinfo[idx].len;
2565 src += copyinfo[idx].len;
2566 idx++;
2567 }
2568 }
2569
2570 /*
2571 * Return the amount of in-use and wired memory for the VM. Since
2572 * these are global stats, only return the values with for vCPU 0
2573 */
2574 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2575 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2576
2577 static void
2578 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2579 {
2580
2581 if (vcpu == 0) {
2582 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2583 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2584 }
2585 }
2586
2587 static void
2588 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2589 {
2590
2591 if (vcpu == 0) {
2592 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2593 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2594 }
2595 }
2596
2597 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2598 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
Cache object: 8dcc9199cf66953bd93de28d2e84bf36
|