FreeBSD/Linux Kernel Cross Reference
sys/amd64/vmm/vmm.c
1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
38 #include <sys/pcpu.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/systm.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
54
55 #include <machine/cpu.h>
56 #include <machine/vm.h>
57 #include <machine/pcb.h>
58 #include <machine/smp.h>
59 #include <x86/psl.h>
60 #include <x86/apicreg.h>
61 #include <machine/vmparam.h>
62
63 #include <machine/vmm.h>
64 #include <machine/vmm_dev.h>
65 #include <machine/vmm_instruction_emul.h>
66
67 #include "vmm_ioport.h"
68 #include "vmm_ktr.h"
69 #include "vmm_host.h"
70 #include "vmm_mem.h"
71 #include "vmm_util.h"
72 #include "vatpic.h"
73 #include "vatpit.h"
74 #include "vhpet.h"
75 #include "vioapic.h"
76 #include "vlapic.h"
77 #include "vpmtmr.h"
78 #include "vrtc.h"
79 #include "vmm_ipi.h"
80 #include "vmm_stat.h"
81 #include "vmm_lapic.h"
82
83 #include "io/ppt.h"
84 #include "io/iommu.h"
85
86 struct vlapic;
87
88 /*
89 * Initialization:
90 * (a) allocated when vcpu is created
91 * (i) initialized when vcpu is created and when it is reinitialized
92 * (o) initialized the first time the vcpu is created
93 * (x) initialized before use
94 */
95 struct vcpu {
96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
97 enum vcpu_state state; /* (o) vcpu state */
98 int hostcpu; /* (o) vcpu's host cpu */
99 int reqidle; /* (i) request vcpu to idle */
100 struct vlapic *vlapic; /* (i) APIC device model */
101 enum x2apic_state x2apic_state; /* (i) APIC mode */
102 uint64_t exitintinfo; /* (i) events pending at VM exit */
103 int nmi_pending; /* (i) NMI pending */
104 int extint_pending; /* (i) INTR pending */
105 int exception_pending; /* (i) exception pending */
106 int exc_vector; /* (x) exception collateral */
107 int exc_errcode_valid;
108 uint32_t exc_errcode;
109 struct savefpu *guestfpu; /* (a,i) guest fpu state */
110 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
111 void *stats; /* (a,i) statistics */
112 struct vm_exit exitinfo; /* (x) exit reason and collateral */
113 uint64_t nextrip; /* (x) next instruction to execute */
114 };
115
116 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
117 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
118 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
119 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
120 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
121
122 struct mem_seg {
123 size_t len;
124 bool sysmem;
125 struct vm_object *object;
126 };
127 #define VM_MAX_MEMSEGS 2
128
129 struct mem_map {
130 vm_paddr_t gpa;
131 size_t len;
132 vm_ooffset_t segoff;
133 int segid;
134 int prot;
135 int flags;
136 };
137 #define VM_MAX_MEMMAPS 4
138
139 /*
140 * Initialization:
141 * (o) initialized the first time the VM is created
142 * (i) initialized when VM is created and when it is reinitialized
143 * (x) initialized before use
144 */
145 struct vm {
146 void *cookie; /* (i) cpu-specific data */
147 void *iommu; /* (x) iommu-specific data */
148 struct vhpet *vhpet; /* (i) virtual HPET */
149 struct vioapic *vioapic; /* (i) virtual ioapic */
150 struct vatpic *vatpic; /* (i) virtual atpic */
151 struct vatpit *vatpit; /* (i) virtual atpit */
152 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
153 struct vrtc *vrtc; /* (o) virtual RTC */
154 volatile cpuset_t active_cpus; /* (i) active vcpus */
155 int suspend; /* (i) stop VM execution */
156 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
157 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
158 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
159 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
160 void *rendezvous_arg; /* (x) rendezvous func/arg */
161 vm_rendezvous_func_t rendezvous_func;
162 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
163 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
164 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
165 struct vmspace *vmspace; /* (o) guest's address space */
166 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
167 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
168 };
169
170 static int vmm_initialized;
171
172 static struct vmm_ops *ops;
173 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
174 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
175 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
176
177 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
178 #define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
179 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
180 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
181 #define VMSPACE_ALLOC(min, max) \
182 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
183 #define VMSPACE_FREE(vmspace) \
184 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
185 #define VMGETREG(vmi, vcpu, num, retval) \
186 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
187 #define VMSETREG(vmi, vcpu, num, val) \
188 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
189 #define VMGETDESC(vmi, vcpu, num, desc) \
190 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
191 #define VMSETDESC(vmi, vcpu, num, desc) \
192 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
193 #define VMGETCAP(vmi, vcpu, num, retval) \
194 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
195 #define VMSETCAP(vmi, vcpu, num, val) \
196 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
197 #define VLAPIC_INIT(vmi, vcpu) \
198 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
199 #define VLAPIC_CLEANUP(vmi, vlapic) \
200 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
201
202 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
203 #define fpu_stop_emulating() clts()
204
205 static MALLOC_DEFINE(M_VM, "vm", "vm");
206
207 /* statistics */
208 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
209
210 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
211
212 /*
213 * Halt the guest if all vcpus are executing a HLT instruction with
214 * interrupts disabled.
215 */
216 static int halt_detection_enabled = 1;
217 TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled);
218 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
219 &halt_detection_enabled, 0,
220 "Halt VM if all vcpus execute HLT with interrupts disabled");
221
222 static int vmm_ipinum;
223 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
224 "IPI vector used for vcpu notifications");
225
226 static int trace_guest_exceptions;
227 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
228 &trace_guest_exceptions, 0,
229 "Trap into hypervisor on all guest exceptions and reflect them back");
230
231 static void vm_free_memmap(struct vm *vm, int ident);
232 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
233 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
234
235 #ifdef KTR
236 static const char *
237 vcpu_state2str(enum vcpu_state state)
238 {
239
240 switch (state) {
241 case VCPU_IDLE:
242 return ("idle");
243 case VCPU_FROZEN:
244 return ("frozen");
245 case VCPU_RUNNING:
246 return ("running");
247 case VCPU_SLEEPING:
248 return ("sleeping");
249 default:
250 return ("unknown");
251 }
252 }
253 #endif
254
255 static void
256 vcpu_cleanup(struct vm *vm, int i, bool destroy)
257 {
258 struct vcpu *vcpu = &vm->vcpu[i];
259
260 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
261 if (destroy) {
262 vmm_stat_free(vcpu->stats);
263 fpu_save_area_free(vcpu->guestfpu);
264 }
265 }
266
267 static void
268 vcpu_init(struct vm *vm, int vcpu_id, bool create)
269 {
270 struct vcpu *vcpu;
271
272 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
273 ("vcpu_init: invalid vcpu %d", vcpu_id));
274
275 vcpu = &vm->vcpu[vcpu_id];
276
277 if (create) {
278 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
279 "initialized", vcpu_id));
280 vcpu_lock_init(vcpu);
281 vcpu->state = VCPU_IDLE;
282 vcpu->hostcpu = NOCPU;
283 vcpu->guestfpu = fpu_save_area_alloc();
284 vcpu->stats = vmm_stat_alloc();
285 }
286
287 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
288 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
289 vcpu->reqidle = 0;
290 vcpu->exitintinfo = 0;
291 vcpu->nmi_pending = 0;
292 vcpu->extint_pending = 0;
293 vcpu->exception_pending = 0;
294 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
295 fpu_save_area_reset(vcpu->guestfpu);
296 vmm_stat_init(vcpu->stats);
297 }
298
299 int
300 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
301 {
302
303 return (trace_guest_exceptions);
304 }
305
306 struct vm_exit *
307 vm_exitinfo(struct vm *vm, int cpuid)
308 {
309 struct vcpu *vcpu;
310
311 if (cpuid < 0 || cpuid >= VM_MAXCPU)
312 panic("vm_exitinfo: invalid cpuid %d", cpuid);
313
314 vcpu = &vm->vcpu[cpuid];
315
316 return (&vcpu->exitinfo);
317 }
318
319 static void
320 vmm_resume(void)
321 {
322 VMM_RESUME();
323 }
324
325 static int
326 vmm_init(void)
327 {
328 int error;
329
330 vmm_host_state_init();
331
332 vmm_ipinum = vmm_ipi_alloc();
333 if (vmm_ipinum == 0)
334 vmm_ipinum = IPI_AST;
335
336 error = vmm_mem_init();
337 if (error)
338 return (error);
339
340 if (vmm_is_intel())
341 ops = &vmm_ops_intel;
342 else if (vmm_is_amd())
343 ops = &vmm_ops_amd;
344 else
345 return (ENXIO);
346
347 vmm_resume_p = vmm_resume;
348
349 return (VMM_INIT(vmm_ipinum));
350 }
351
352 static int
353 vmm_handler(module_t mod, int what, void *arg)
354 {
355 int error;
356
357 switch (what) {
358 case MOD_LOAD:
359 vmmdev_init();
360 error = vmm_init();
361 if (error == 0)
362 vmm_initialized = 1;
363 break;
364 case MOD_UNLOAD:
365 error = vmmdev_cleanup();
366 if (error == 0) {
367 vmm_resume_p = NULL;
368 iommu_cleanup();
369 if (vmm_ipinum != IPI_AST)
370 vmm_ipi_free(vmm_ipinum);
371 error = VMM_CLEANUP();
372 /*
373 * Something bad happened - prevent new
374 * VMs from being created
375 */
376 if (error)
377 vmm_initialized = 0;
378 }
379 break;
380 default:
381 error = 0;
382 break;
383 }
384 return (error);
385 }
386
387 static moduledata_t vmm_kmod = {
388 "vmm",
389 vmm_handler,
390 NULL
391 };
392
393 /*
394 * vmm initialization has the following dependencies:
395 *
396 * - VT-x initialization requires smp_rendezvous() and therefore must happen
397 * after SMP is fully functional (after SI_SUB_SMP).
398 */
399 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
400 MODULE_VERSION(vmm, 1);
401
402 static void
403 vm_init(struct vm *vm, bool create)
404 {
405 int i;
406
407 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
408 vm->iommu = NULL;
409 vm->vioapic = vioapic_init(vm);
410 vm->vhpet = vhpet_init(vm);
411 vm->vatpic = vatpic_init(vm);
412 vm->vatpit = vatpit_init(vm);
413 vm->vpmtmr = vpmtmr_init(vm);
414 if (create)
415 vm->vrtc = vrtc_init(vm);
416
417 CPU_ZERO(&vm->active_cpus);
418
419 vm->suspend = 0;
420 CPU_ZERO(&vm->suspended_cpus);
421
422 for (i = 0; i < VM_MAXCPU; i++)
423 vcpu_init(vm, i, create);
424 }
425
426 int
427 vm_create(const char *name, struct vm **retvm)
428 {
429 struct vm *vm;
430 struct vmspace *vmspace;
431
432 /*
433 * If vmm.ko could not be successfully initialized then don't attempt
434 * to create the virtual machine.
435 */
436 if (!vmm_initialized)
437 return (ENXIO);
438
439 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
440 return (EINVAL);
441
442 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
443 if (vmspace == NULL)
444 return (ENOMEM);
445
446 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
447 strcpy(vm->name, name);
448 vm->vmspace = vmspace;
449 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
450
451 vm_init(vm, true);
452
453 *retvm = vm;
454 return (0);
455 }
456
457 static void
458 vm_cleanup(struct vm *vm, bool destroy)
459 {
460 struct mem_map *mm;
461 int i;
462
463 ppt_unassign_all(vm);
464
465 if (vm->iommu != NULL)
466 iommu_destroy_domain(vm->iommu);
467
468 if (destroy)
469 vrtc_cleanup(vm->vrtc);
470 else
471 vrtc_reset(vm->vrtc);
472 vpmtmr_cleanup(vm->vpmtmr);
473 vatpit_cleanup(vm->vatpit);
474 vhpet_cleanup(vm->vhpet);
475 vatpic_cleanup(vm->vatpic);
476 vioapic_cleanup(vm->vioapic);
477
478 for (i = 0; i < VM_MAXCPU; i++)
479 vcpu_cleanup(vm, i, destroy);
480
481 VMCLEANUP(vm->cookie);
482
483 /*
484 * System memory is removed from the guest address space only when
485 * the VM is destroyed. This is because the mapping remains the same
486 * across VM reset.
487 *
488 * Device memory can be relocated by the guest (e.g. using PCI BARs)
489 * so those mappings are removed on a VM reset.
490 */
491 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
492 mm = &vm->mem_maps[i];
493 if (destroy || !sysmem_mapping(vm, mm))
494 vm_free_memmap(vm, i);
495 }
496
497 if (destroy) {
498 for (i = 0; i < VM_MAX_MEMSEGS; i++)
499 vm_free_memseg(vm, i);
500
501 VMSPACE_FREE(vm->vmspace);
502 vm->vmspace = NULL;
503 }
504 }
505
506 void
507 vm_destroy(struct vm *vm)
508 {
509 vm_cleanup(vm, true);
510 free(vm, M_VM);
511 }
512
513 int
514 vm_reinit(struct vm *vm)
515 {
516 int error;
517
518 /*
519 * A virtual machine can be reset only if all vcpus are suspended.
520 */
521 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
522 vm_cleanup(vm, false);
523 vm_init(vm, false);
524 error = 0;
525 } else {
526 error = EBUSY;
527 }
528
529 return (error);
530 }
531
532 const char *
533 vm_name(struct vm *vm)
534 {
535 return (vm->name);
536 }
537
538 int
539 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
540 {
541 vm_object_t obj;
542
543 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
544 return (ENOMEM);
545 else
546 return (0);
547 }
548
549 int
550 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
551 {
552
553 vmm_mmio_free(vm->vmspace, gpa, len);
554 return (0);
555 }
556
557 /*
558 * Return 'true' if 'gpa' is allocated in the guest address space.
559 *
560 * This function is called in the context of a running vcpu which acts as
561 * an implicit lock on 'vm->mem_maps[]'.
562 */
563 bool
564 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
565 {
566 struct mem_map *mm;
567 int i;
568
569 #ifdef INVARIANTS
570 int hostcpu, state;
571 state = vcpu_get_state(vm, vcpuid, &hostcpu);
572 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
573 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
574 #endif
575
576 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
577 mm = &vm->mem_maps[i];
578 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
579 return (true); /* 'gpa' is sysmem or devmem */
580 }
581
582 if (ppt_is_mmio(vm, gpa))
583 return (true); /* 'gpa' is pci passthru mmio */
584
585 return (false);
586 }
587
588 int
589 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
590 {
591 struct mem_seg *seg;
592 vm_object_t obj;
593
594 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
595 return (EINVAL);
596
597 if (len == 0 || (len & PAGE_MASK))
598 return (EINVAL);
599
600 seg = &vm->mem_segs[ident];
601 if (seg->object != NULL) {
602 if (seg->len == len && seg->sysmem == sysmem)
603 return (EEXIST);
604 else
605 return (EINVAL);
606 }
607
608 obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
609 if (obj == NULL)
610 return (ENOMEM);
611
612 seg->len = len;
613 seg->object = obj;
614 seg->sysmem = sysmem;
615 return (0);
616 }
617
618 int
619 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
620 vm_object_t *objptr)
621 {
622 struct mem_seg *seg;
623
624 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
625 return (EINVAL);
626
627 seg = &vm->mem_segs[ident];
628 if (len)
629 *len = seg->len;
630 if (sysmem)
631 *sysmem = seg->sysmem;
632 if (objptr)
633 *objptr = seg->object;
634 return (0);
635 }
636
637 void
638 vm_free_memseg(struct vm *vm, int ident)
639 {
640 struct mem_seg *seg;
641
642 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
643 ("%s: invalid memseg ident %d", __func__, ident));
644
645 seg = &vm->mem_segs[ident];
646 if (seg->object != NULL) {
647 vm_object_deallocate(seg->object);
648 bzero(seg, sizeof(struct mem_seg));
649 }
650 }
651
652 int
653 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
654 size_t len, int prot, int flags)
655 {
656 struct mem_seg *seg;
657 struct mem_map *m, *map;
658 vm_ooffset_t last;
659 int i, error;
660
661 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
662 return (EINVAL);
663
664 if (flags & ~VM_MEMMAP_F_WIRED)
665 return (EINVAL);
666
667 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
668 return (EINVAL);
669
670 seg = &vm->mem_segs[segid];
671 if (seg->object == NULL)
672 return (EINVAL);
673
674 last = first + len;
675 if (first < 0 || first >= last || last > seg->len)
676 return (EINVAL);
677
678 if ((gpa | first | last) & PAGE_MASK)
679 return (EINVAL);
680
681 map = NULL;
682 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
683 m = &vm->mem_maps[i];
684 if (m->len == 0) {
685 map = m;
686 break;
687 }
688 }
689
690 if (map == NULL)
691 return (ENOSPC);
692
693 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
694 len, 0, VMFS_NO_SPACE, prot, prot, 0);
695 if (error != KERN_SUCCESS)
696 return (EFAULT);
697
698 vm_object_reference(seg->object);
699
700 if (flags & VM_MEMMAP_F_WIRED) {
701 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
702 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
703 if (error != KERN_SUCCESS) {
704 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
705 return (EFAULT);
706 }
707 }
708
709 map->gpa = gpa;
710 map->len = len;
711 map->segoff = first;
712 map->segid = segid;
713 map->prot = prot;
714 map->flags = flags;
715 return (0);
716 }
717
718 int
719 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
720 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
721 {
722 struct mem_map *mm, *mmnext;
723 int i;
724
725 mmnext = NULL;
726 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
727 mm = &vm->mem_maps[i];
728 if (mm->len == 0 || mm->gpa < *gpa)
729 continue;
730 if (mmnext == NULL || mm->gpa < mmnext->gpa)
731 mmnext = mm;
732 }
733
734 if (mmnext != NULL) {
735 *gpa = mmnext->gpa;
736 if (segid)
737 *segid = mmnext->segid;
738 if (segoff)
739 *segoff = mmnext->segoff;
740 if (len)
741 *len = mmnext->len;
742 if (prot)
743 *prot = mmnext->prot;
744 if (flags)
745 *flags = mmnext->flags;
746 return (0);
747 } else {
748 return (ENOENT);
749 }
750 }
751
752 static void
753 vm_free_memmap(struct vm *vm, int ident)
754 {
755 struct mem_map *mm;
756 int error;
757
758 mm = &vm->mem_maps[ident];
759 if (mm->len) {
760 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
761 mm->gpa + mm->len);
762 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
763 __func__, error));
764 bzero(mm, sizeof(struct mem_map));
765 }
766 }
767
768 static __inline bool
769 sysmem_mapping(struct vm *vm, struct mem_map *mm)
770 {
771
772 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
773 return (true);
774 else
775 return (false);
776 }
777
778 static vm_paddr_t
779 sysmem_maxaddr(struct vm *vm)
780 {
781 struct mem_map *mm;
782 vm_paddr_t maxaddr;
783 int i;
784
785 maxaddr = 0;
786 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
787 mm = &vm->mem_maps[i];
788 if (sysmem_mapping(vm, mm)) {
789 if (maxaddr < mm->gpa + mm->len)
790 maxaddr = mm->gpa + mm->len;
791 }
792 }
793 return (maxaddr);
794 }
795
796 static void
797 vm_iommu_modify(struct vm *vm, boolean_t map)
798 {
799 int i, sz;
800 vm_paddr_t gpa, hpa;
801 struct mem_map *mm;
802 void *vp, *cookie, *host_domain;
803
804 sz = PAGE_SIZE;
805 host_domain = iommu_host_domain();
806
807 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
808 mm = &vm->mem_maps[i];
809 if (!sysmem_mapping(vm, mm))
810 continue;
811
812 if (map) {
813 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
814 ("iommu map found invalid memmap %#lx/%#lx/%#x",
815 mm->gpa, mm->len, mm->flags));
816 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
817 continue;
818 mm->flags |= VM_MEMMAP_F_IOMMU;
819 } else {
820 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
821 continue;
822 mm->flags &= ~VM_MEMMAP_F_IOMMU;
823 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
824 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
825 mm->gpa, mm->len, mm->flags));
826 }
827
828 gpa = mm->gpa;
829 while (gpa < mm->gpa + mm->len) {
830 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
831 &cookie);
832 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
833 vm_name(vm), gpa));
834
835 vm_gpa_release(cookie);
836
837 hpa = DMAP_TO_PHYS((uintptr_t)vp);
838 if (map) {
839 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
840 iommu_remove_mapping(host_domain, hpa, sz);
841 } else {
842 iommu_remove_mapping(vm->iommu, gpa, sz);
843 iommu_create_mapping(host_domain, hpa, hpa, sz);
844 }
845
846 gpa += PAGE_SIZE;
847 }
848 }
849
850 /*
851 * Invalidate the cached translations associated with the domain
852 * from which pages were removed.
853 */
854 if (map)
855 iommu_invalidate_tlb(host_domain);
856 else
857 iommu_invalidate_tlb(vm->iommu);
858 }
859
860 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
861 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
862
863 int
864 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
865 {
866 int error;
867
868 error = ppt_unassign_device(vm, bus, slot, func);
869 if (error)
870 return (error);
871
872 if (ppt_assigned_devices(vm) == 0)
873 vm_iommu_unmap(vm);
874
875 return (0);
876 }
877
878 int
879 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
880 {
881 int error;
882 vm_paddr_t maxaddr;
883
884 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
885 if (ppt_assigned_devices(vm) == 0) {
886 KASSERT(vm->iommu == NULL,
887 ("vm_assign_pptdev: iommu must be NULL"));
888 maxaddr = sysmem_maxaddr(vm);
889 vm->iommu = iommu_create_domain(maxaddr);
890 if (vm->iommu == NULL)
891 return (ENXIO);
892 vm_iommu_map(vm);
893 }
894
895 error = ppt_assign_device(vm, bus, slot, func);
896 return (error);
897 }
898
899 void *
900 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
901 void **cookie)
902 {
903 int i, count, pageoff;
904 struct mem_map *mm;
905 vm_page_t m;
906 #ifdef INVARIANTS
907 /*
908 * All vcpus are frozen by ioctls that modify the memory map
909 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
910 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
911 */
912 int state;
913 KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d",
914 __func__, vcpuid));
915 for (i = 0; i < VM_MAXCPU; i++) {
916 if (vcpuid != -1 && vcpuid != i)
917 continue;
918 state = vcpu_get_state(vm, i, NULL);
919 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
920 __func__, state));
921 }
922 #endif
923 pageoff = gpa & PAGE_MASK;
924 if (len > PAGE_SIZE - pageoff)
925 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
926
927 count = 0;
928 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
929 mm = &vm->mem_maps[i];
930 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
931 gpa < mm->gpa + mm->len) {
932 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
933 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
934 break;
935 }
936 }
937
938 if (count == 1) {
939 *cookie = m;
940 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
941 } else {
942 *cookie = NULL;
943 return (NULL);
944 }
945 }
946
947 void
948 vm_gpa_release(void *cookie)
949 {
950 vm_page_t m = cookie;
951
952 vm_page_lock(m);
953 vm_page_unhold(m);
954 vm_page_unlock(m);
955 }
956
957 int
958 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
959 {
960
961 if (vcpu < 0 || vcpu >= VM_MAXCPU)
962 return (EINVAL);
963
964 if (reg >= VM_REG_LAST)
965 return (EINVAL);
966
967 return (VMGETREG(vm->cookie, vcpu, reg, retval));
968 }
969
970 int
971 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
972 {
973 struct vcpu *vcpu;
974 int error;
975
976 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
977 return (EINVAL);
978
979 if (reg >= VM_REG_LAST)
980 return (EINVAL);
981
982 error = VMSETREG(vm->cookie, vcpuid, reg, val);
983 if (error || reg != VM_REG_GUEST_RIP)
984 return (error);
985
986 /* Set 'nextrip' to match the value of %rip */
987 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
988 vcpu = &vm->vcpu[vcpuid];
989 vcpu->nextrip = val;
990 return (0);
991 }
992
993 static boolean_t
994 is_descriptor_table(int reg)
995 {
996
997 switch (reg) {
998 case VM_REG_GUEST_IDTR:
999 case VM_REG_GUEST_GDTR:
1000 return (TRUE);
1001 default:
1002 return (FALSE);
1003 }
1004 }
1005
1006 static boolean_t
1007 is_segment_register(int reg)
1008 {
1009
1010 switch (reg) {
1011 case VM_REG_GUEST_ES:
1012 case VM_REG_GUEST_CS:
1013 case VM_REG_GUEST_SS:
1014 case VM_REG_GUEST_DS:
1015 case VM_REG_GUEST_FS:
1016 case VM_REG_GUEST_GS:
1017 case VM_REG_GUEST_TR:
1018 case VM_REG_GUEST_LDTR:
1019 return (TRUE);
1020 default:
1021 return (FALSE);
1022 }
1023 }
1024
1025 int
1026 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1027 struct seg_desc *desc)
1028 {
1029
1030 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1031 return (EINVAL);
1032
1033 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1034 return (EINVAL);
1035
1036 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1037 }
1038
1039 int
1040 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1041 struct seg_desc *desc)
1042 {
1043 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1044 return (EINVAL);
1045
1046 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1047 return (EINVAL);
1048
1049 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1050 }
1051
1052 static void
1053 restore_guest_fpustate(struct vcpu *vcpu)
1054 {
1055
1056 /* flush host state to the pcb */
1057 fpuexit(curthread);
1058
1059 /* restore guest FPU state */
1060 fpu_stop_emulating();
1061 fpurestore(vcpu->guestfpu);
1062
1063 /* restore guest XCR0 if XSAVE is enabled in the host */
1064 if (rcr4() & CR4_XSAVE)
1065 load_xcr(0, vcpu->guest_xcr0);
1066
1067 /*
1068 * The FPU is now "dirty" with the guest's state so turn on emulation
1069 * to trap any access to the FPU by the host.
1070 */
1071 fpu_start_emulating();
1072 }
1073
1074 static void
1075 save_guest_fpustate(struct vcpu *vcpu)
1076 {
1077
1078 if ((rcr0() & CR0_TS) == 0)
1079 panic("fpu emulation not enabled in host!");
1080
1081 /* save guest XCR0 and restore host XCR0 */
1082 if (rcr4() & CR4_XSAVE) {
1083 vcpu->guest_xcr0 = rxcr(0);
1084 load_xcr(0, vmm_get_host_xcr0());
1085 }
1086
1087 /* save guest FPU state */
1088 fpu_stop_emulating();
1089 fpusave(vcpu->guestfpu);
1090 fpu_start_emulating();
1091 }
1092
1093 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1094
1095 static int
1096 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1097 bool from_idle)
1098 {
1099 struct vcpu *vcpu;
1100 int error;
1101
1102 vcpu = &vm->vcpu[vcpuid];
1103 vcpu_assert_locked(vcpu);
1104
1105 /*
1106 * State transitions from the vmmdev_ioctl() must always begin from
1107 * the VCPU_IDLE state. This guarantees that there is only a single
1108 * ioctl() operating on a vcpu at any point.
1109 */
1110 if (from_idle) {
1111 while (vcpu->state != VCPU_IDLE) {
1112 vcpu->reqidle = 1;
1113 vcpu_notify_event_locked(vcpu, false);
1114 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1115 "idle requested", vcpu_state2str(vcpu->state));
1116 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1117 }
1118 } else {
1119 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1120 "vcpu idle state"));
1121 }
1122
1123 if (vcpu->state == VCPU_RUNNING) {
1124 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1125 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1126 } else {
1127 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1128 "vcpu that is not running", vcpu->hostcpu));
1129 }
1130
1131 /*
1132 * The following state transitions are allowed:
1133 * IDLE -> FROZEN -> IDLE
1134 * FROZEN -> RUNNING -> FROZEN
1135 * FROZEN -> SLEEPING -> FROZEN
1136 */
1137 switch (vcpu->state) {
1138 case VCPU_IDLE:
1139 case VCPU_RUNNING:
1140 case VCPU_SLEEPING:
1141 error = (newstate != VCPU_FROZEN);
1142 break;
1143 case VCPU_FROZEN:
1144 error = (newstate == VCPU_FROZEN);
1145 break;
1146 default:
1147 error = 1;
1148 break;
1149 }
1150
1151 if (error)
1152 return (EBUSY);
1153
1154 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1155 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1156
1157 vcpu->state = newstate;
1158 if (newstate == VCPU_RUNNING)
1159 vcpu->hostcpu = curcpu;
1160 else
1161 vcpu->hostcpu = NOCPU;
1162
1163 if (newstate == VCPU_IDLE)
1164 wakeup(&vcpu->state);
1165
1166 return (0);
1167 }
1168
1169 static void
1170 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1171 {
1172 int error;
1173
1174 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1175 panic("Error %d setting state to %d\n", error, newstate);
1176 }
1177
1178 static void
1179 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1180 {
1181 int error;
1182
1183 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1184 panic("Error %d setting state to %d", error, newstate);
1185 }
1186
1187 static void
1188 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
1189 {
1190
1191 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
1192
1193 /*
1194 * Update 'rendezvous_func' and execute a write memory barrier to
1195 * ensure that it is visible across all host cpus. This is not needed
1196 * for correctness but it does ensure that all the vcpus will notice
1197 * that the rendezvous is requested immediately.
1198 */
1199 vm->rendezvous_func = func;
1200 wmb();
1201 }
1202
1203 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1204 do { \
1205 if (vcpuid >= 0) \
1206 VCPU_CTR0(vm, vcpuid, fmt); \
1207 else \
1208 VM_CTR0(vm, fmt); \
1209 } while (0)
1210
1211 static void
1212 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1213 {
1214
1215 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
1216 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1217
1218 mtx_lock(&vm->rendezvous_mtx);
1219 while (vm->rendezvous_func != NULL) {
1220 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1221 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1222
1223 if (vcpuid != -1 &&
1224 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1225 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1226 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1227 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1228 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1229 }
1230 if (CPU_CMP(&vm->rendezvous_req_cpus,
1231 &vm->rendezvous_done_cpus) == 0) {
1232 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1233 vm_set_rendezvous_func(vm, NULL);
1234 wakeup(&vm->rendezvous_func);
1235 break;
1236 }
1237 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1238 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1239 "vmrndv", 0);
1240 }
1241 mtx_unlock(&vm->rendezvous_mtx);
1242 }
1243
1244 /*
1245 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1246 */
1247 static int
1248 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1249 {
1250 struct vcpu *vcpu;
1251 const char *wmesg;
1252 int t, vcpu_halted, vm_halted;
1253
1254 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1255
1256 vcpu = &vm->vcpu[vcpuid];
1257 vcpu_halted = 0;
1258 vm_halted = 0;
1259
1260 vcpu_lock(vcpu);
1261 while (1) {
1262 /*
1263 * Do a final check for pending NMI or interrupts before
1264 * really putting this thread to sleep. Also check for
1265 * software events that would cause this vcpu to wakeup.
1266 *
1267 * These interrupts/events could have happened after the
1268 * vcpu returned from VMRUN() and before it acquired the
1269 * vcpu lock above.
1270 */
1271 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1272 break;
1273 if (vm_nmi_pending(vm, vcpuid))
1274 break;
1275 if (!intr_disabled) {
1276 if (vm_extint_pending(vm, vcpuid) ||
1277 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1278 break;
1279 }
1280 }
1281
1282 /* Don't go to sleep if the vcpu thread needs to yield */
1283 if (vcpu_should_yield(vm, vcpuid))
1284 break;
1285
1286 /*
1287 * Some Linux guests implement "halt" by having all vcpus
1288 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1289 * track of the vcpus that have entered this state. When all
1290 * vcpus enter the halted state the virtual machine is halted.
1291 */
1292 if (intr_disabled) {
1293 wmesg = "vmhalt";
1294 VCPU_CTR0(vm, vcpuid, "Halted");
1295 if (!vcpu_halted && halt_detection_enabled) {
1296 vcpu_halted = 1;
1297 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1298 }
1299 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1300 vm_halted = 1;
1301 break;
1302 }
1303 } else {
1304 wmesg = "vmidle";
1305 }
1306
1307 t = ticks;
1308 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1309 /*
1310 * XXX msleep_spin() cannot be interrupted by signals so
1311 * wake up periodically to check pending signals.
1312 */
1313 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1314 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1315 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1316 }
1317
1318 if (vcpu_halted)
1319 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1320
1321 vcpu_unlock(vcpu);
1322
1323 if (vm_halted)
1324 vm_suspend(vm, VM_SUSPEND_HALT);
1325
1326 return (0);
1327 }
1328
1329 static int
1330 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1331 {
1332 int rv, ftype;
1333 struct vm_map *map;
1334 struct vcpu *vcpu;
1335 struct vm_exit *vme;
1336
1337 vcpu = &vm->vcpu[vcpuid];
1338 vme = &vcpu->exitinfo;
1339
1340 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1341 __func__, vme->inst_length));
1342
1343 ftype = vme->u.paging.fault_type;
1344 KASSERT(ftype == VM_PROT_READ ||
1345 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1346 ("vm_handle_paging: invalid fault_type %d", ftype));
1347
1348 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1349 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1350 vme->u.paging.gpa, ftype);
1351 if (rv == 0) {
1352 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1353 ftype == VM_PROT_READ ? "accessed" : "dirty",
1354 vme->u.paging.gpa);
1355 goto done;
1356 }
1357 }
1358
1359 map = &vm->vmspace->vm_map;
1360 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1361
1362 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1363 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1364
1365 if (rv != KERN_SUCCESS)
1366 return (EFAULT);
1367 done:
1368 return (0);
1369 }
1370
1371 static int
1372 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1373 {
1374 struct vie *vie;
1375 struct vcpu *vcpu;
1376 struct vm_exit *vme;
1377 uint64_t gla, gpa, cs_base;
1378 struct vm_guest_paging *paging;
1379 mem_region_read_t mread;
1380 mem_region_write_t mwrite;
1381 enum vm_cpu_mode cpu_mode;
1382 int cs_d, error, fault;
1383
1384 vcpu = &vm->vcpu[vcpuid];
1385 vme = &vcpu->exitinfo;
1386
1387 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1388 __func__, vme->inst_length));
1389
1390 gla = vme->u.inst_emul.gla;
1391 gpa = vme->u.inst_emul.gpa;
1392 cs_base = vme->u.inst_emul.cs_base;
1393 cs_d = vme->u.inst_emul.cs_d;
1394 vie = &vme->u.inst_emul.vie;
1395 paging = &vme->u.inst_emul.paging;
1396 cpu_mode = paging->cpu_mode;
1397
1398 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1399
1400 /* Fetch, decode and emulate the faulting instruction */
1401 if (vie->num_valid == 0) {
1402 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1403 cs_base, VIE_INST_SIZE, vie, &fault);
1404 } else {
1405 /*
1406 * The instruction bytes have already been copied into 'vie'
1407 */
1408 error = fault = 0;
1409 }
1410 if (error || fault)
1411 return (error);
1412
1413 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1414 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1415 vme->rip + cs_base);
1416 *retu = true; /* dump instruction bytes in userspace */
1417 return (0);
1418 }
1419
1420 /*
1421 * Update 'nextrip' based on the length of the emulated instruction.
1422 */
1423 vme->inst_length = vie->num_processed;
1424 vcpu->nextrip += vie->num_processed;
1425 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1426 "decoding", vcpu->nextrip);
1427
1428 /* return to userland unless this is an in-kernel emulated device */
1429 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1430 mread = lapic_mmio_read;
1431 mwrite = lapic_mmio_write;
1432 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1433 mread = vioapic_mmio_read;
1434 mwrite = vioapic_mmio_write;
1435 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1436 mread = vhpet_mmio_read;
1437 mwrite = vhpet_mmio_write;
1438 } else {
1439 *retu = true;
1440 return (0);
1441 }
1442
1443 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1444 mread, mwrite, retu);
1445
1446 return (error);
1447 }
1448
1449 static int
1450 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1451 {
1452 int i, done;
1453 struct vcpu *vcpu;
1454
1455 done = 0;
1456 vcpu = &vm->vcpu[vcpuid];
1457
1458 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1459
1460 /*
1461 * Wait until all 'active_cpus' have suspended themselves.
1462 *
1463 * Since a VM may be suspended at any time including when one or
1464 * more vcpus are doing a rendezvous we need to call the rendezvous
1465 * handler while we are waiting to prevent a deadlock.
1466 */
1467 vcpu_lock(vcpu);
1468 while (1) {
1469 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1470 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1471 break;
1472 }
1473
1474 if (vm->rendezvous_func == NULL) {
1475 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1476 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1477 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1478 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1479 } else {
1480 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1481 vcpu_unlock(vcpu);
1482 vm_handle_rendezvous(vm, vcpuid);
1483 vcpu_lock(vcpu);
1484 }
1485 }
1486 vcpu_unlock(vcpu);
1487
1488 /*
1489 * Wakeup the other sleeping vcpus and return to userspace.
1490 */
1491 for (i = 0; i < VM_MAXCPU; i++) {
1492 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1493 vcpu_notify_event(vm, i, false);
1494 }
1495 }
1496
1497 *retu = true;
1498 return (0);
1499 }
1500
1501 static int
1502 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1503 {
1504 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1505
1506 vcpu_lock(vcpu);
1507 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1508 vcpu->reqidle = 0;
1509 vcpu_unlock(vcpu);
1510 *retu = true;
1511 return (0);
1512 }
1513
1514 int
1515 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1516 {
1517 int i;
1518
1519 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1520 return (EINVAL);
1521
1522 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1523 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1524 vm->suspend, how);
1525 return (EALREADY);
1526 }
1527
1528 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1529
1530 /*
1531 * Notify all active vcpus that they are now suspended.
1532 */
1533 for (i = 0; i < VM_MAXCPU; i++) {
1534 if (CPU_ISSET(i, &vm->active_cpus))
1535 vcpu_notify_event(vm, i, false);
1536 }
1537
1538 return (0);
1539 }
1540
1541 void
1542 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1543 {
1544 struct vm_exit *vmexit;
1545
1546 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1547 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1548
1549 vmexit = vm_exitinfo(vm, vcpuid);
1550 vmexit->rip = rip;
1551 vmexit->inst_length = 0;
1552 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1553 vmexit->u.suspended.how = vm->suspend;
1554 }
1555
1556 void
1557 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1558 {
1559 struct vm_exit *vmexit;
1560
1561 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1562
1563 vmexit = vm_exitinfo(vm, vcpuid);
1564 vmexit->rip = rip;
1565 vmexit->inst_length = 0;
1566 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1567 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1568 }
1569
1570 void
1571 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1572 {
1573 struct vm_exit *vmexit;
1574
1575 vmexit = vm_exitinfo(vm, vcpuid);
1576 vmexit->rip = rip;
1577 vmexit->inst_length = 0;
1578 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1579 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1580 }
1581
1582 void
1583 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1584 {
1585 struct vm_exit *vmexit;
1586
1587 vmexit = vm_exitinfo(vm, vcpuid);
1588 vmexit->rip = rip;
1589 vmexit->inst_length = 0;
1590 vmexit->exitcode = VM_EXITCODE_BOGUS;
1591 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1592 }
1593
1594 int
1595 vm_run(struct vm *vm, struct vm_run *vmrun)
1596 {
1597 struct vm_eventinfo evinfo;
1598 int error, vcpuid;
1599 struct vcpu *vcpu;
1600 struct pcb *pcb;
1601 uint64_t tscval;
1602 struct vm_exit *vme;
1603 bool retu, intr_disabled;
1604 pmap_t pmap;
1605
1606 vcpuid = vmrun->cpuid;
1607
1608 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1609 return (EINVAL);
1610
1611 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1612 return (EINVAL);
1613
1614 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1615 return (EINVAL);
1616
1617 pmap = vmspace_pmap(vm->vmspace);
1618 vcpu = &vm->vcpu[vcpuid];
1619 vme = &vcpu->exitinfo;
1620 evinfo.rptr = &vm->rendezvous_func;
1621 evinfo.sptr = &vm->suspend;
1622 evinfo.iptr = &vcpu->reqidle;
1623 restart:
1624 critical_enter();
1625
1626 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1627 ("vm_run: absurd pm_active"));
1628
1629 tscval = rdtsc();
1630
1631 pcb = PCPU_GET(curpcb);
1632 set_pcb_flags(pcb, PCB_FULL_IRET);
1633
1634 restore_guest_fpustate(vcpu);
1635
1636 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1637 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1638 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1639
1640 save_guest_fpustate(vcpu);
1641
1642 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1643
1644 critical_exit();
1645
1646 if (error == 0) {
1647 retu = false;
1648 vcpu->nextrip = vme->rip + vme->inst_length;
1649 switch (vme->exitcode) {
1650 case VM_EXITCODE_REQIDLE:
1651 error = vm_handle_reqidle(vm, vcpuid, &retu);
1652 break;
1653 case VM_EXITCODE_SUSPENDED:
1654 error = vm_handle_suspend(vm, vcpuid, &retu);
1655 break;
1656 case VM_EXITCODE_IOAPIC_EOI:
1657 vioapic_process_eoi(vm, vcpuid,
1658 vme->u.ioapic_eoi.vector);
1659 break;
1660 case VM_EXITCODE_RENDEZVOUS:
1661 vm_handle_rendezvous(vm, vcpuid);
1662 error = 0;
1663 break;
1664 case VM_EXITCODE_HLT:
1665 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1666 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1667 break;
1668 case VM_EXITCODE_PAGING:
1669 error = vm_handle_paging(vm, vcpuid, &retu);
1670 break;
1671 case VM_EXITCODE_INST_EMUL:
1672 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1673 break;
1674 case VM_EXITCODE_INOUT:
1675 case VM_EXITCODE_INOUT_STR:
1676 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1677 break;
1678 case VM_EXITCODE_MONITOR:
1679 case VM_EXITCODE_MWAIT:
1680 vm_inject_ud(vm, vcpuid);
1681 break;
1682 default:
1683 retu = true; /* handled in userland */
1684 break;
1685 }
1686 }
1687
1688 if (error == 0 && retu == false)
1689 goto restart;
1690
1691 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1692
1693 /* copy the exit information */
1694 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1695 return (error);
1696 }
1697
1698 int
1699 vm_restart_instruction(void *arg, int vcpuid)
1700 {
1701 struct vm *vm;
1702 struct vcpu *vcpu;
1703 enum vcpu_state state;
1704 uint64_t rip;
1705 int error;
1706
1707 vm = arg;
1708 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1709 return (EINVAL);
1710
1711 vcpu = &vm->vcpu[vcpuid];
1712 state = vcpu_get_state(vm, vcpuid, NULL);
1713 if (state == VCPU_RUNNING) {
1714 /*
1715 * When a vcpu is "running" the next instruction is determined
1716 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1717 * Thus setting 'inst_length' to zero will cause the current
1718 * instruction to be restarted.
1719 */
1720 vcpu->exitinfo.inst_length = 0;
1721 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1722 "setting inst_length to zero", vcpu->exitinfo.rip);
1723 } else if (state == VCPU_FROZEN) {
1724 /*
1725 * When a vcpu is "frozen" it is outside the critical section
1726 * around VMRUN() and 'nextrip' points to the next instruction.
1727 * Thus instruction restart is achieved by setting 'nextrip'
1728 * to the vcpu's %rip.
1729 */
1730 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1731 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1732 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1733 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1734 vcpu->nextrip = rip;
1735 } else {
1736 panic("%s: invalid state %d", __func__, state);
1737 }
1738 return (0);
1739 }
1740
1741 int
1742 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1743 {
1744 struct vcpu *vcpu;
1745 int type, vector;
1746
1747 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1748 return (EINVAL);
1749
1750 vcpu = &vm->vcpu[vcpuid];
1751
1752 if (info & VM_INTINFO_VALID) {
1753 type = info & VM_INTINFO_TYPE;
1754 vector = info & 0xff;
1755 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1756 return (EINVAL);
1757 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1758 return (EINVAL);
1759 if (info & VM_INTINFO_RSVD)
1760 return (EINVAL);
1761 } else {
1762 info = 0;
1763 }
1764 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1765 vcpu->exitintinfo = info;
1766 return (0);
1767 }
1768
1769 enum exc_class {
1770 EXC_BENIGN,
1771 EXC_CONTRIBUTORY,
1772 EXC_PAGEFAULT
1773 };
1774
1775 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1776
1777 static enum exc_class
1778 exception_class(uint64_t info)
1779 {
1780 int type, vector;
1781
1782 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1783 type = info & VM_INTINFO_TYPE;
1784 vector = info & 0xff;
1785
1786 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1787 switch (type) {
1788 case VM_INTINFO_HWINTR:
1789 case VM_INTINFO_SWINTR:
1790 case VM_INTINFO_NMI:
1791 return (EXC_BENIGN);
1792 default:
1793 /*
1794 * Hardware exception.
1795 *
1796 * SVM and VT-x use identical type values to represent NMI,
1797 * hardware interrupt and software interrupt.
1798 *
1799 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1800 * for exceptions except #BP and #OF. #BP and #OF use a type
1801 * value of '5' or '6'. Therefore we don't check for explicit
1802 * values of 'type' to classify 'intinfo' into a hardware
1803 * exception.
1804 */
1805 break;
1806 }
1807
1808 switch (vector) {
1809 case IDT_PF:
1810 case IDT_VE:
1811 return (EXC_PAGEFAULT);
1812 case IDT_DE:
1813 case IDT_TS:
1814 case IDT_NP:
1815 case IDT_SS:
1816 case IDT_GP:
1817 return (EXC_CONTRIBUTORY);
1818 default:
1819 return (EXC_BENIGN);
1820 }
1821 }
1822
1823 static int
1824 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1825 uint64_t *retinfo)
1826 {
1827 enum exc_class exc1, exc2;
1828 int type1, vector1;
1829
1830 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1831 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1832
1833 /*
1834 * If an exception occurs while attempting to call the double-fault
1835 * handler the processor enters shutdown mode (aka triple fault).
1836 */
1837 type1 = info1 & VM_INTINFO_TYPE;
1838 vector1 = info1 & 0xff;
1839 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1840 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1841 info1, info2);
1842 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1843 *retinfo = 0;
1844 return (0);
1845 }
1846
1847 /*
1848 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1849 */
1850 exc1 = exception_class(info1);
1851 exc2 = exception_class(info2);
1852 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1853 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1854 /* Convert nested fault into a double fault. */
1855 *retinfo = IDT_DF;
1856 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1857 *retinfo |= VM_INTINFO_DEL_ERRCODE;
1858 } else {
1859 /* Handle exceptions serially */
1860 *retinfo = info2;
1861 }
1862 return (1);
1863 }
1864
1865 static uint64_t
1866 vcpu_exception_intinfo(struct vcpu *vcpu)
1867 {
1868 uint64_t info = 0;
1869
1870 if (vcpu->exception_pending) {
1871 info = vcpu->exc_vector & 0xff;
1872 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1873 if (vcpu->exc_errcode_valid) {
1874 info |= VM_INTINFO_DEL_ERRCODE;
1875 info |= (uint64_t)vcpu->exc_errcode << 32;
1876 }
1877 }
1878 return (info);
1879 }
1880
1881 int
1882 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1883 {
1884 struct vcpu *vcpu;
1885 uint64_t info1, info2;
1886 int valid;
1887
1888 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
1889
1890 vcpu = &vm->vcpu[vcpuid];
1891
1892 info1 = vcpu->exitintinfo;
1893 vcpu->exitintinfo = 0;
1894
1895 info2 = 0;
1896 if (vcpu->exception_pending) {
1897 info2 = vcpu_exception_intinfo(vcpu);
1898 vcpu->exception_pending = 0;
1899 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1900 vcpu->exc_vector, info2);
1901 }
1902
1903 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1904 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1905 } else if (info1 & VM_INTINFO_VALID) {
1906 *retinfo = info1;
1907 valid = 1;
1908 } else if (info2 & VM_INTINFO_VALID) {
1909 *retinfo = info2;
1910 valid = 1;
1911 } else {
1912 valid = 0;
1913 }
1914
1915 if (valid) {
1916 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1917 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1918 }
1919
1920 return (valid);
1921 }
1922
1923 int
1924 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1925 {
1926 struct vcpu *vcpu;
1927
1928 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1929 return (EINVAL);
1930
1931 vcpu = &vm->vcpu[vcpuid];
1932 *info1 = vcpu->exitintinfo;
1933 *info2 = vcpu_exception_intinfo(vcpu);
1934 return (0);
1935 }
1936
1937 int
1938 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
1939 uint32_t errcode, int restart_instruction)
1940 {
1941 struct vcpu *vcpu;
1942 uint64_t regval;
1943 int error;
1944
1945 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1946 return (EINVAL);
1947
1948 if (vector < 0 || vector >= 32)
1949 return (EINVAL);
1950
1951 /*
1952 * A double fault exception should never be injected directly into
1953 * the guest. It is a derived exception that results from specific
1954 * combinations of nested faults.
1955 */
1956 if (vector == IDT_DF)
1957 return (EINVAL);
1958
1959 vcpu = &vm->vcpu[vcpuid];
1960
1961 if (vcpu->exception_pending) {
1962 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
1963 "pending exception %d", vector, vcpu->exc_vector);
1964 return (EBUSY);
1965 }
1966
1967 if (errcode_valid) {
1968 /*
1969 * Exceptions don't deliver an error code in real mode.
1970 */
1971 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val);
1972 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
1973 if (!(regval & CR0_PE))
1974 errcode_valid = 0;
1975 }
1976
1977 /*
1978 * From section 26.6.1 "Interruptibility State" in Intel SDM:
1979 *
1980 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
1981 * one instruction or incurs an exception.
1982 */
1983 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
1984 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
1985 __func__, error));
1986
1987 if (restart_instruction)
1988 vm_restart_instruction(vm, vcpuid);
1989
1990 vcpu->exception_pending = 1;
1991 vcpu->exc_vector = vector;
1992 vcpu->exc_errcode = errcode;
1993 vcpu->exc_errcode_valid = errcode_valid;
1994 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
1995 return (0);
1996 }
1997
1998 void
1999 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
2000 int errcode)
2001 {
2002 struct vm *vm;
2003 int error, restart_instruction;
2004
2005 vm = vmarg;
2006 restart_instruction = 1;
2007
2008 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2009 errcode, restart_instruction);
2010 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2011 }
2012
2013 void
2014 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
2015 {
2016 struct vm *vm;
2017 int error;
2018
2019 vm = vmarg;
2020 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2021 error_code, cr2);
2022
2023 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2024 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2025
2026 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2027 }
2028
2029 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2030
2031 int
2032 vm_inject_nmi(struct vm *vm, int vcpuid)
2033 {
2034 struct vcpu *vcpu;
2035
2036 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2037 return (EINVAL);
2038
2039 vcpu = &vm->vcpu[vcpuid];
2040
2041 vcpu->nmi_pending = 1;
2042 vcpu_notify_event(vm, vcpuid, false);
2043 return (0);
2044 }
2045
2046 int
2047 vm_nmi_pending(struct vm *vm, int vcpuid)
2048 {
2049 struct vcpu *vcpu;
2050
2051 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2052 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2053
2054 vcpu = &vm->vcpu[vcpuid];
2055
2056 return (vcpu->nmi_pending);
2057 }
2058
2059 void
2060 vm_nmi_clear(struct vm *vm, int vcpuid)
2061 {
2062 struct vcpu *vcpu;
2063
2064 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2065 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2066
2067 vcpu = &vm->vcpu[vcpuid];
2068
2069 if (vcpu->nmi_pending == 0)
2070 panic("vm_nmi_clear: inconsistent nmi_pending state");
2071
2072 vcpu->nmi_pending = 0;
2073 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2074 }
2075
2076 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2077
2078 int
2079 vm_inject_extint(struct vm *vm, int vcpuid)
2080 {
2081 struct vcpu *vcpu;
2082
2083 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2084 return (EINVAL);
2085
2086 vcpu = &vm->vcpu[vcpuid];
2087
2088 vcpu->extint_pending = 1;
2089 vcpu_notify_event(vm, vcpuid, false);
2090 return (0);
2091 }
2092
2093 int
2094 vm_extint_pending(struct vm *vm, int vcpuid)
2095 {
2096 struct vcpu *vcpu;
2097
2098 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2099 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2100
2101 vcpu = &vm->vcpu[vcpuid];
2102
2103 return (vcpu->extint_pending);
2104 }
2105
2106 void
2107 vm_extint_clear(struct vm *vm, int vcpuid)
2108 {
2109 struct vcpu *vcpu;
2110
2111 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2112 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2113
2114 vcpu = &vm->vcpu[vcpuid];
2115
2116 if (vcpu->extint_pending == 0)
2117 panic("vm_extint_clear: inconsistent extint_pending state");
2118
2119 vcpu->extint_pending = 0;
2120 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2121 }
2122
2123 int
2124 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2125 {
2126 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2127 return (EINVAL);
2128
2129 if (type < 0 || type >= VM_CAP_MAX)
2130 return (EINVAL);
2131
2132 return (VMGETCAP(vm->cookie, vcpu, type, retval));
2133 }
2134
2135 int
2136 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2137 {
2138 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2139 return (EINVAL);
2140
2141 if (type < 0 || type >= VM_CAP_MAX)
2142 return (EINVAL);
2143
2144 return (VMSETCAP(vm->cookie, vcpu, type, val));
2145 }
2146
2147 struct vlapic *
2148 vm_lapic(struct vm *vm, int cpu)
2149 {
2150 return (vm->vcpu[cpu].vlapic);
2151 }
2152
2153 struct vioapic *
2154 vm_ioapic(struct vm *vm)
2155 {
2156
2157 return (vm->vioapic);
2158 }
2159
2160 struct vhpet *
2161 vm_hpet(struct vm *vm)
2162 {
2163
2164 return (vm->vhpet);
2165 }
2166
2167 boolean_t
2168 vmm_is_pptdev(int bus, int slot, int func)
2169 {
2170 int found, i, n;
2171 int b, s, f;
2172 char *val, *cp, *cp2;
2173
2174 /*
2175 * XXX
2176 * The length of an environment variable is limited to 128 bytes which
2177 * puts an upper limit on the number of passthru devices that may be
2178 * specified using a single environment variable.
2179 *
2180 * Work around this by scanning multiple environment variable
2181 * names instead of a single one - yuck!
2182 */
2183 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2184
2185 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2186 found = 0;
2187 for (i = 0; names[i] != NULL && !found; i++) {
2188 cp = val = getenv(names[i]);
2189 while (cp != NULL && *cp != '\0') {
2190 if ((cp2 = strchr(cp, ' ')) != NULL)
2191 *cp2 = '\0';
2192
2193 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2194 if (n == 3 && bus == b && slot == s && func == f) {
2195 found = 1;
2196 break;
2197 }
2198
2199 if (cp2 != NULL)
2200 *cp2++ = ' ';
2201
2202 cp = cp2;
2203 }
2204 freeenv(val);
2205 }
2206 return (found);
2207 }
2208
2209 void *
2210 vm_iommu_domain(struct vm *vm)
2211 {
2212
2213 return (vm->iommu);
2214 }
2215
2216 int
2217 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2218 bool from_idle)
2219 {
2220 int error;
2221 struct vcpu *vcpu;
2222
2223 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2224 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2225
2226 vcpu = &vm->vcpu[vcpuid];
2227
2228 vcpu_lock(vcpu);
2229 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2230 vcpu_unlock(vcpu);
2231
2232 return (error);
2233 }
2234
2235 enum vcpu_state
2236 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2237 {
2238 struct vcpu *vcpu;
2239 enum vcpu_state state;
2240
2241 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2242 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2243
2244 vcpu = &vm->vcpu[vcpuid];
2245
2246 vcpu_lock(vcpu);
2247 state = vcpu->state;
2248 if (hostcpu != NULL)
2249 *hostcpu = vcpu->hostcpu;
2250 vcpu_unlock(vcpu);
2251
2252 return (state);
2253 }
2254
2255 int
2256 vm_activate_cpu(struct vm *vm, int vcpuid)
2257 {
2258
2259 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2260 return (EINVAL);
2261
2262 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2263 return (EBUSY);
2264
2265 VCPU_CTR0(vm, vcpuid, "activated");
2266 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2267 return (0);
2268 }
2269
2270 cpuset_t
2271 vm_active_cpus(struct vm *vm)
2272 {
2273
2274 return (vm->active_cpus);
2275 }
2276
2277 cpuset_t
2278 vm_suspended_cpus(struct vm *vm)
2279 {
2280
2281 return (vm->suspended_cpus);
2282 }
2283
2284 void *
2285 vcpu_stats(struct vm *vm, int vcpuid)
2286 {
2287
2288 return (vm->vcpu[vcpuid].stats);
2289 }
2290
2291 int
2292 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2293 {
2294 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2295 return (EINVAL);
2296
2297 *state = vm->vcpu[vcpuid].x2apic_state;
2298
2299 return (0);
2300 }
2301
2302 int
2303 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2304 {
2305 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2306 return (EINVAL);
2307
2308 if (state >= X2APIC_STATE_LAST)
2309 return (EINVAL);
2310
2311 vm->vcpu[vcpuid].x2apic_state = state;
2312
2313 vlapic_set_x2apic_state(vm, vcpuid, state);
2314
2315 return (0);
2316 }
2317
2318 /*
2319 * This function is called to ensure that a vcpu "sees" a pending event
2320 * as soon as possible:
2321 * - If the vcpu thread is sleeping then it is woken up.
2322 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2323 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2324 */
2325 static void
2326 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2327 {
2328 int hostcpu;
2329
2330 hostcpu = vcpu->hostcpu;
2331 if (vcpu->state == VCPU_RUNNING) {
2332 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2333 if (hostcpu != curcpu) {
2334 if (lapic_intr) {
2335 vlapic_post_intr(vcpu->vlapic, hostcpu,
2336 vmm_ipinum);
2337 } else {
2338 ipi_cpu(hostcpu, vmm_ipinum);
2339 }
2340 } else {
2341 /*
2342 * If the 'vcpu' is running on 'curcpu' then it must
2343 * be sending a notification to itself (e.g. SELF_IPI).
2344 * The pending event will be picked up when the vcpu
2345 * transitions back to guest context.
2346 */
2347 }
2348 } else {
2349 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2350 "with hostcpu %d", vcpu->state, hostcpu));
2351 if (vcpu->state == VCPU_SLEEPING)
2352 wakeup_one(vcpu);
2353 }
2354 }
2355
2356 void
2357 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2358 {
2359 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2360
2361 vcpu_lock(vcpu);
2362 vcpu_notify_event_locked(vcpu, lapic_intr);
2363 vcpu_unlock(vcpu);
2364 }
2365
2366 struct vmspace *
2367 vm_get_vmspace(struct vm *vm)
2368 {
2369
2370 return (vm->vmspace);
2371 }
2372
2373 int
2374 vm_apicid2vcpuid(struct vm *vm, int apicid)
2375 {
2376 /*
2377 * XXX apic id is assumed to be numerically identical to vcpu id
2378 */
2379 return (apicid);
2380 }
2381
2382 void
2383 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2384 vm_rendezvous_func_t func, void *arg)
2385 {
2386 int i;
2387
2388 /*
2389 * Enforce that this function is called without any locks
2390 */
2391 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2392 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
2393 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2394
2395 restart:
2396 mtx_lock(&vm->rendezvous_mtx);
2397 if (vm->rendezvous_func != NULL) {
2398 /*
2399 * If a rendezvous is already in progress then we need to
2400 * call the rendezvous handler in case this 'vcpuid' is one
2401 * of the targets of the rendezvous.
2402 */
2403 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2404 mtx_unlock(&vm->rendezvous_mtx);
2405 vm_handle_rendezvous(vm, vcpuid);
2406 goto restart;
2407 }
2408 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2409 "rendezvous is still in progress"));
2410
2411 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2412 vm->rendezvous_req_cpus = dest;
2413 CPU_ZERO(&vm->rendezvous_done_cpus);
2414 vm->rendezvous_arg = arg;
2415 vm_set_rendezvous_func(vm, func);
2416 mtx_unlock(&vm->rendezvous_mtx);
2417
2418 /*
2419 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2420 * vcpus so they handle the rendezvous as soon as possible.
2421 */
2422 for (i = 0; i < VM_MAXCPU; i++) {
2423 if (CPU_ISSET(i, &dest))
2424 vcpu_notify_event(vm, i, false);
2425 }
2426
2427 vm_handle_rendezvous(vm, vcpuid);
2428 }
2429
2430 struct vatpic *
2431 vm_atpic(struct vm *vm)
2432 {
2433 return (vm->vatpic);
2434 }
2435
2436 struct vatpit *
2437 vm_atpit(struct vm *vm)
2438 {
2439 return (vm->vatpit);
2440 }
2441
2442 struct vpmtmr *
2443 vm_pmtmr(struct vm *vm)
2444 {
2445
2446 return (vm->vpmtmr);
2447 }
2448
2449 struct vrtc *
2450 vm_rtc(struct vm *vm)
2451 {
2452
2453 return (vm->vrtc);
2454 }
2455
2456 enum vm_reg_name
2457 vm_segment_name(int seg)
2458 {
2459 static enum vm_reg_name seg_names[] = {
2460 VM_REG_GUEST_ES,
2461 VM_REG_GUEST_CS,
2462 VM_REG_GUEST_SS,
2463 VM_REG_GUEST_DS,
2464 VM_REG_GUEST_FS,
2465 VM_REG_GUEST_GS
2466 };
2467
2468 KASSERT(seg >= 0 && seg < nitems(seg_names),
2469 ("%s: invalid segment encoding %d", __func__, seg));
2470 return (seg_names[seg]);
2471 }
2472
2473 void
2474 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2475 int num_copyinfo)
2476 {
2477 int idx;
2478
2479 for (idx = 0; idx < num_copyinfo; idx++) {
2480 if (copyinfo[idx].cookie != NULL)
2481 vm_gpa_release(copyinfo[idx].cookie);
2482 }
2483 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2484 }
2485
2486 int
2487 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2488 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2489 int num_copyinfo, int *fault)
2490 {
2491 int error, idx, nused;
2492 size_t n, off, remaining;
2493 void *hva, *cookie;
2494 uint64_t gpa;
2495
2496 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2497
2498 nused = 0;
2499 remaining = len;
2500 while (remaining > 0) {
2501 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2502 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2503 if (error || *fault)
2504 return (error);
2505 off = gpa & PAGE_MASK;
2506 n = min(remaining, PAGE_SIZE - off);
2507 copyinfo[nused].gpa = gpa;
2508 copyinfo[nused].len = n;
2509 remaining -= n;
2510 gla += n;
2511 nused++;
2512 }
2513
2514 for (idx = 0; idx < nused; idx++) {
2515 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2516 copyinfo[idx].len, prot, &cookie);
2517 if (hva == NULL)
2518 break;
2519 copyinfo[idx].hva = hva;
2520 copyinfo[idx].cookie = cookie;
2521 }
2522
2523 if (idx != nused) {
2524 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2525 return (EFAULT);
2526 } else {
2527 *fault = 0;
2528 return (0);
2529 }
2530 }
2531
2532 void
2533 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2534 size_t len)
2535 {
2536 char *dst;
2537 int idx;
2538
2539 dst = kaddr;
2540 idx = 0;
2541 while (len > 0) {
2542 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2543 len -= copyinfo[idx].len;
2544 dst += copyinfo[idx].len;
2545 idx++;
2546 }
2547 }
2548
2549 void
2550 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2551 struct vm_copyinfo *copyinfo, size_t len)
2552 {
2553 const char *src;
2554 int idx;
2555
2556 src = kaddr;
2557 idx = 0;
2558 while (len > 0) {
2559 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2560 len -= copyinfo[idx].len;
2561 src += copyinfo[idx].len;
2562 idx++;
2563 }
2564 }
2565
2566 /*
2567 * Return the amount of in-use and wired memory for the VM. Since
2568 * these are global stats, only return the values with for vCPU 0
2569 */
2570 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2571 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2572
2573 static void
2574 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2575 {
2576
2577 if (vcpu == 0) {
2578 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2579 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2580 }
2581 }
2582
2583 static void
2584 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2585 {
2586
2587 if (vcpu == 0) {
2588 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2589 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2590 }
2591 }
2592
2593 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2594 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
Cache object: ce795ff293fc51b597774895ea494cf2
|