FreeBSD/Linux Kernel Cross Reference
sys/amd64/vmm/vmm.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/pcpu.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/systm.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56
57 #include <machine/cpu.h>
58 #include <machine/pcb.h>
59 #include <machine/smp.h>
60 #include <machine/md_var.h>
61 #include <x86/psl.h>
62 #include <x86/apicreg.h>
63
64 #include <machine/vmm.h>
65 #include <machine/vmm_dev.h>
66 #include <machine/vmm_instruction_emul.h>
67
68 #include "vmm_ioport.h"
69 #include "vmm_ktr.h"
70 #include "vmm_host.h"
71 #include "vmm_mem.h"
72 #include "vmm_util.h"
73 #include "vatpic.h"
74 #include "vatpit.h"
75 #include "vhpet.h"
76 #include "vioapic.h"
77 #include "vlapic.h"
78 #include "vpmtmr.h"
79 #include "vrtc.h"
80 #include "vmm_stat.h"
81 #include "vmm_lapic.h"
82
83 #include "io/ppt.h"
84 #include "io/iommu.h"
85
86 struct vlapic;
87
88 /*
89 * Initialization:
90 * (a) allocated when vcpu is created
91 * (i) initialized when vcpu is created and when it is reinitialized
92 * (o) initialized the first time the vcpu is created
93 * (x) initialized before use
94 */
95 struct vcpu {
96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
97 enum vcpu_state state; /* (o) vcpu state */
98 int hostcpu; /* (o) vcpu's host cpu */
99 int reqidle; /* (i) request vcpu to idle */
100 struct vlapic *vlapic; /* (i) APIC device model */
101 enum x2apic_state x2apic_state; /* (i) APIC mode */
102 uint64_t exitintinfo; /* (i) events pending at VM exit */
103 int nmi_pending; /* (i) NMI pending */
104 int extint_pending; /* (i) INTR pending */
105 int exception_pending; /* (i) exception pending */
106 int exc_vector; /* (x) exception collateral */
107 int exc_errcode_valid;
108 uint32_t exc_errcode;
109 struct savefpu *guestfpu; /* (a,i) guest fpu state */
110 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
111 void *stats; /* (a,i) statistics */
112 struct vm_exit exitinfo; /* (x) exit reason and collateral */
113 uint64_t nextrip; /* (x) next instruction to execute */
114 };
115
116 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
117 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
118 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
119 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
120 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
121
122 struct mem_seg {
123 size_t len;
124 bool sysmem;
125 struct vm_object *object;
126 };
127 #define VM_MAX_MEMSEGS 3
128
129 struct mem_map {
130 vm_paddr_t gpa;
131 size_t len;
132 vm_ooffset_t segoff;
133 int segid;
134 int prot;
135 int flags;
136 };
137 #define VM_MAX_MEMMAPS 4
138
139 /*
140 * Initialization:
141 * (o) initialized the first time the VM is created
142 * (i) initialized when VM is created and when it is reinitialized
143 * (x) initialized before use
144 */
145 struct vm {
146 void *cookie; /* (i) cpu-specific data */
147 void *iommu; /* (x) iommu-specific data */
148 struct vhpet *vhpet; /* (i) virtual HPET */
149 struct vioapic *vioapic; /* (i) virtual ioapic */
150 struct vatpic *vatpic; /* (i) virtual atpic */
151 struct vatpit *vatpit; /* (i) virtual atpit */
152 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
153 struct vrtc *vrtc; /* (o) virtual RTC */
154 volatile cpuset_t active_cpus; /* (i) active vcpus */
155 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
156 int suspend; /* (i) stop VM execution */
157 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
158 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
159 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
160 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
161 void *rendezvous_arg; /* (x) rendezvous func/arg */
162 vm_rendezvous_func_t rendezvous_func;
163 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
164 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
165 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
166 struct vmspace *vmspace; /* (o) guest's address space */
167 char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
168 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
169 /* The following describe the vm cpu topology */
170 uint16_t sockets; /* (o) num of sockets */
171 uint16_t cores; /* (o) num of cores/socket */
172 uint16_t threads; /* (o) num of threads/core */
173 uint16_t maxcpus; /* (o) max pluggable cpus */
174 };
175
176 static int vmm_initialized;
177
178 static struct vmm_ops *ops;
179 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
180 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
181 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
182
183 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
184 #define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
185 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
186 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
187 #define VMSPACE_ALLOC(min, max) \
188 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
189 #define VMSPACE_FREE(vmspace) \
190 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
191 #define VMGETREG(vmi, vcpu, num, retval) \
192 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
193 #define VMSETREG(vmi, vcpu, num, val) \
194 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
195 #define VMGETDESC(vmi, vcpu, num, desc) \
196 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
197 #define VMSETDESC(vmi, vcpu, num, desc) \
198 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
199 #define VMGETCAP(vmi, vcpu, num, retval) \
200 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
201 #define VMSETCAP(vmi, vcpu, num, val) \
202 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
203 #define VLAPIC_INIT(vmi, vcpu) \
204 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
205 #define VLAPIC_CLEANUP(vmi, vlapic) \
206 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
207
208 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
209 #define fpu_stop_emulating() clts()
210
211 SDT_PROVIDER_DEFINE(vmm);
212
213 static MALLOC_DEFINE(M_VM, "vm", "vm");
214
215 /* statistics */
216 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
217
218 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
219
220 /*
221 * Halt the guest if all vcpus are executing a HLT instruction with
222 * interrupts disabled.
223 */
224 static int halt_detection_enabled = 1;
225 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
226 &halt_detection_enabled, 0,
227 "Halt VM if all vcpus execute HLT with interrupts disabled");
228
229 static int vmm_ipinum;
230 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
231 "IPI vector used for vcpu notifications");
232
233 static int trace_guest_exceptions;
234 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
235 &trace_guest_exceptions, 0,
236 "Trap into hypervisor on all guest exceptions and reflect them back");
237
238 static void vm_free_memmap(struct vm *vm, int ident);
239 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
240 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
241
242 #ifdef KTR
243 static const char *
244 vcpu_state2str(enum vcpu_state state)
245 {
246
247 switch (state) {
248 case VCPU_IDLE:
249 return ("idle");
250 case VCPU_FROZEN:
251 return ("frozen");
252 case VCPU_RUNNING:
253 return ("running");
254 case VCPU_SLEEPING:
255 return ("sleeping");
256 default:
257 return ("unknown");
258 }
259 }
260 #endif
261
262 static void
263 vcpu_cleanup(struct vm *vm, int i, bool destroy)
264 {
265 struct vcpu *vcpu = &vm->vcpu[i];
266
267 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
268 if (destroy) {
269 vmm_stat_free(vcpu->stats);
270 fpu_save_area_free(vcpu->guestfpu);
271 }
272 }
273
274 static void
275 vcpu_init(struct vm *vm, int vcpu_id, bool create)
276 {
277 struct vcpu *vcpu;
278
279 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
280 ("vcpu_init: invalid vcpu %d", vcpu_id));
281
282 vcpu = &vm->vcpu[vcpu_id];
283
284 if (create) {
285 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
286 "initialized", vcpu_id));
287 vcpu_lock_init(vcpu);
288 vcpu->state = VCPU_IDLE;
289 vcpu->hostcpu = NOCPU;
290 vcpu->guestfpu = fpu_save_area_alloc();
291 vcpu->stats = vmm_stat_alloc();
292 }
293
294 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
295 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
296 vcpu->reqidle = 0;
297 vcpu->exitintinfo = 0;
298 vcpu->nmi_pending = 0;
299 vcpu->extint_pending = 0;
300 vcpu->exception_pending = 0;
301 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
302 fpu_save_area_reset(vcpu->guestfpu);
303 vmm_stat_init(vcpu->stats);
304 }
305
306 int
307 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
308 {
309
310 return (trace_guest_exceptions);
311 }
312
313 struct vm_exit *
314 vm_exitinfo(struct vm *vm, int cpuid)
315 {
316 struct vcpu *vcpu;
317
318 if (cpuid < 0 || cpuid >= vm->maxcpus)
319 panic("vm_exitinfo: invalid cpuid %d", cpuid);
320
321 vcpu = &vm->vcpu[cpuid];
322
323 return (&vcpu->exitinfo);
324 }
325
326 static void
327 vmm_resume(void)
328 {
329 VMM_RESUME();
330 }
331
332 static int
333 vmm_init(void)
334 {
335 int error;
336
337 vmm_host_state_init();
338
339 vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
340 &IDTVEC(justreturn));
341 if (vmm_ipinum < 0)
342 vmm_ipinum = IPI_AST;
343
344 error = vmm_mem_init();
345 if (error)
346 return (error);
347
348 if (vmm_is_intel())
349 ops = &vmm_ops_intel;
350 else if (vmm_is_svm())
351 ops = &vmm_ops_amd;
352 else
353 return (ENXIO);
354
355 vmm_resume_p = vmm_resume;
356
357 return (VMM_INIT(vmm_ipinum));
358 }
359
360 static int
361 vmm_handler(module_t mod, int what, void *arg)
362 {
363 int error;
364
365 switch (what) {
366 case MOD_LOAD:
367 vmmdev_init();
368 error = vmm_init();
369 if (error == 0)
370 vmm_initialized = 1;
371 break;
372 case MOD_UNLOAD:
373 error = vmmdev_cleanup();
374 if (error == 0) {
375 vmm_resume_p = NULL;
376 iommu_cleanup();
377 if (vmm_ipinum != IPI_AST)
378 lapic_ipi_free(vmm_ipinum);
379 error = VMM_CLEANUP();
380 /*
381 * Something bad happened - prevent new
382 * VMs from being created
383 */
384 if (error)
385 vmm_initialized = 0;
386 }
387 break;
388 default:
389 error = 0;
390 break;
391 }
392 return (error);
393 }
394
395 static moduledata_t vmm_kmod = {
396 "vmm",
397 vmm_handler,
398 NULL
399 };
400
401 /*
402 * vmm initialization has the following dependencies:
403 *
404 * - VT-x initialization requires smp_rendezvous() and therefore must happen
405 * after SMP is fully functional (after SI_SUB_SMP).
406 */
407 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
408 MODULE_VERSION(vmm, 1);
409
410 static void
411 vm_init(struct vm *vm, bool create)
412 {
413 int i;
414
415 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
416 vm->iommu = NULL;
417 vm->vioapic = vioapic_init(vm);
418 vm->vhpet = vhpet_init(vm);
419 vm->vatpic = vatpic_init(vm);
420 vm->vatpit = vatpit_init(vm);
421 vm->vpmtmr = vpmtmr_init(vm);
422 if (create)
423 vm->vrtc = vrtc_init(vm);
424
425 CPU_ZERO(&vm->active_cpus);
426 CPU_ZERO(&vm->debug_cpus);
427
428 vm->suspend = 0;
429 CPU_ZERO(&vm->suspended_cpus);
430
431 for (i = 0; i < vm->maxcpus; i++)
432 vcpu_init(vm, i, create);
433 }
434
435 /*
436 * The default CPU topology is a single thread per package.
437 */
438 u_int cores_per_package = 1;
439 u_int threads_per_core = 1;
440
441 int
442 vm_create(const char *name, struct vm **retvm)
443 {
444 struct vm *vm;
445 struct vmspace *vmspace;
446
447 /*
448 * If vmm.ko could not be successfully initialized then don't attempt
449 * to create the virtual machine.
450 */
451 if (!vmm_initialized)
452 return (ENXIO);
453
454 if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) ==
455 VM_MAX_NAMELEN + 1)
456 return (EINVAL);
457
458 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
459 if (vmspace == NULL)
460 return (ENOMEM);
461
462 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
463 strcpy(vm->name, name);
464 vm->vmspace = vmspace;
465 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
466
467 vm->sockets = 1;
468 vm->cores = cores_per_package; /* XXX backwards compatibility */
469 vm->threads = threads_per_core; /* XXX backwards compatibility */
470 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
471
472 vm_init(vm, true);
473
474 *retvm = vm;
475 return (0);
476 }
477
478 void
479 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
480 uint16_t *threads, uint16_t *maxcpus)
481 {
482 *sockets = vm->sockets;
483 *cores = vm->cores;
484 *threads = vm->threads;
485 *maxcpus = vm->maxcpus;
486 }
487
488 uint16_t
489 vm_get_maxcpus(struct vm *vm)
490 {
491 return (vm->maxcpus);
492 }
493
494 int
495 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
496 uint16_t threads, uint16_t maxcpus)
497 {
498 if (maxcpus != 0)
499 return (EINVAL); /* XXX remove when supported */
500 if ((sockets * cores * threads) > vm->maxcpus)
501 return (EINVAL);
502 /* XXX need to check sockets * cores * threads == vCPU, how? */
503 vm->sockets = sockets;
504 vm->cores = cores;
505 vm->threads = threads;
506 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
507 return(0);
508 }
509
510 static void
511 vm_cleanup(struct vm *vm, bool destroy)
512 {
513 struct mem_map *mm;
514 int i;
515
516 ppt_unassign_all(vm);
517
518 if (vm->iommu != NULL)
519 iommu_destroy_domain(vm->iommu);
520
521 if (destroy)
522 vrtc_cleanup(vm->vrtc);
523 else
524 vrtc_reset(vm->vrtc);
525 vpmtmr_cleanup(vm->vpmtmr);
526 vatpit_cleanup(vm->vatpit);
527 vhpet_cleanup(vm->vhpet);
528 vatpic_cleanup(vm->vatpic);
529 vioapic_cleanup(vm->vioapic);
530
531 for (i = 0; i < vm->maxcpus; i++)
532 vcpu_cleanup(vm, i, destroy);
533
534 VMCLEANUP(vm->cookie);
535
536 /*
537 * System memory is removed from the guest address space only when
538 * the VM is destroyed. This is because the mapping remains the same
539 * across VM reset.
540 *
541 * Device memory can be relocated by the guest (e.g. using PCI BARs)
542 * so those mappings are removed on a VM reset.
543 */
544 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
545 mm = &vm->mem_maps[i];
546 if (destroy || !sysmem_mapping(vm, mm))
547 vm_free_memmap(vm, i);
548 }
549
550 if (destroy) {
551 for (i = 0; i < VM_MAX_MEMSEGS; i++)
552 vm_free_memseg(vm, i);
553
554 VMSPACE_FREE(vm->vmspace);
555 vm->vmspace = NULL;
556 }
557 }
558
559 void
560 vm_destroy(struct vm *vm)
561 {
562 vm_cleanup(vm, true);
563 free(vm, M_VM);
564 }
565
566 int
567 vm_reinit(struct vm *vm)
568 {
569 int error;
570
571 /*
572 * A virtual machine can be reset only if all vcpus are suspended.
573 */
574 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
575 vm_cleanup(vm, false);
576 vm_init(vm, false);
577 error = 0;
578 } else {
579 error = EBUSY;
580 }
581
582 return (error);
583 }
584
585 const char *
586 vm_name(struct vm *vm)
587 {
588 return (vm->name);
589 }
590
591 int
592 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
593 {
594 vm_object_t obj;
595
596 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
597 return (ENOMEM);
598 else
599 return (0);
600 }
601
602 int
603 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
604 {
605
606 vmm_mmio_free(vm->vmspace, gpa, len);
607 return (0);
608 }
609
610 /*
611 * Return 'true' if 'gpa' is allocated in the guest address space.
612 *
613 * This function is called in the context of a running vcpu which acts as
614 * an implicit lock on 'vm->mem_maps[]'.
615 */
616 bool
617 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
618 {
619 struct mem_map *mm;
620 int i;
621
622 #ifdef INVARIANTS
623 int hostcpu, state;
624 state = vcpu_get_state(vm, vcpuid, &hostcpu);
625 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
626 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
627 #endif
628
629 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
630 mm = &vm->mem_maps[i];
631 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
632 return (true); /* 'gpa' is sysmem or devmem */
633 }
634
635 if (ppt_is_mmio(vm, gpa))
636 return (true); /* 'gpa' is pci passthru mmio */
637
638 return (false);
639 }
640
641 int
642 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
643 {
644 struct mem_seg *seg;
645 vm_object_t obj;
646
647 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
648 return (EINVAL);
649
650 if (len == 0 || (len & PAGE_MASK))
651 return (EINVAL);
652
653 seg = &vm->mem_segs[ident];
654 if (seg->object != NULL) {
655 if (seg->len == len && seg->sysmem == sysmem)
656 return (EEXIST);
657 else
658 return (EINVAL);
659 }
660
661 obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
662 if (obj == NULL)
663 return (ENOMEM);
664
665 seg->len = len;
666 seg->object = obj;
667 seg->sysmem = sysmem;
668 return (0);
669 }
670
671 int
672 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
673 vm_object_t *objptr)
674 {
675 struct mem_seg *seg;
676
677 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
678 return (EINVAL);
679
680 seg = &vm->mem_segs[ident];
681 if (len)
682 *len = seg->len;
683 if (sysmem)
684 *sysmem = seg->sysmem;
685 if (objptr)
686 *objptr = seg->object;
687 return (0);
688 }
689
690 void
691 vm_free_memseg(struct vm *vm, int ident)
692 {
693 struct mem_seg *seg;
694
695 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
696 ("%s: invalid memseg ident %d", __func__, ident));
697
698 seg = &vm->mem_segs[ident];
699 if (seg->object != NULL) {
700 vm_object_deallocate(seg->object);
701 bzero(seg, sizeof(struct mem_seg));
702 }
703 }
704
705 int
706 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
707 size_t len, int prot, int flags)
708 {
709 struct mem_seg *seg;
710 struct mem_map *m, *map;
711 vm_ooffset_t last;
712 int i, error;
713
714 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
715 return (EINVAL);
716
717 if (flags & ~VM_MEMMAP_F_WIRED)
718 return (EINVAL);
719
720 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
721 return (EINVAL);
722
723 seg = &vm->mem_segs[segid];
724 if (seg->object == NULL)
725 return (EINVAL);
726
727 last = first + len;
728 if (first < 0 || first >= last || last > seg->len)
729 return (EINVAL);
730
731 if ((gpa | first | last) & PAGE_MASK)
732 return (EINVAL);
733
734 map = NULL;
735 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
736 m = &vm->mem_maps[i];
737 if (m->len == 0) {
738 map = m;
739 break;
740 }
741 }
742
743 if (map == NULL)
744 return (ENOSPC);
745
746 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
747 len, 0, VMFS_NO_SPACE, prot, prot, 0);
748 if (error != KERN_SUCCESS)
749 return (EFAULT);
750
751 vm_object_reference(seg->object);
752
753 if (flags & VM_MEMMAP_F_WIRED) {
754 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
755 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
756 if (error != KERN_SUCCESS) {
757 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
758 return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
759 EFAULT);
760 }
761 }
762
763 map->gpa = gpa;
764 map->len = len;
765 map->segoff = first;
766 map->segid = segid;
767 map->prot = prot;
768 map->flags = flags;
769 return (0);
770 }
771
772 int
773 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
774 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
775 {
776 struct mem_map *mm, *mmnext;
777 int i;
778
779 mmnext = NULL;
780 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
781 mm = &vm->mem_maps[i];
782 if (mm->len == 0 || mm->gpa < *gpa)
783 continue;
784 if (mmnext == NULL || mm->gpa < mmnext->gpa)
785 mmnext = mm;
786 }
787
788 if (mmnext != NULL) {
789 *gpa = mmnext->gpa;
790 if (segid)
791 *segid = mmnext->segid;
792 if (segoff)
793 *segoff = mmnext->segoff;
794 if (len)
795 *len = mmnext->len;
796 if (prot)
797 *prot = mmnext->prot;
798 if (flags)
799 *flags = mmnext->flags;
800 return (0);
801 } else {
802 return (ENOENT);
803 }
804 }
805
806 static void
807 vm_free_memmap(struct vm *vm, int ident)
808 {
809 struct mem_map *mm;
810 int error;
811
812 mm = &vm->mem_maps[ident];
813 if (mm->len) {
814 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
815 mm->gpa + mm->len);
816 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
817 __func__, error));
818 bzero(mm, sizeof(struct mem_map));
819 }
820 }
821
822 static __inline bool
823 sysmem_mapping(struct vm *vm, struct mem_map *mm)
824 {
825
826 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
827 return (true);
828 else
829 return (false);
830 }
831
832 vm_paddr_t
833 vmm_sysmem_maxaddr(struct vm *vm)
834 {
835 struct mem_map *mm;
836 vm_paddr_t maxaddr;
837 int i;
838
839 maxaddr = 0;
840 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
841 mm = &vm->mem_maps[i];
842 if (sysmem_mapping(vm, mm)) {
843 if (maxaddr < mm->gpa + mm->len)
844 maxaddr = mm->gpa + mm->len;
845 }
846 }
847 return (maxaddr);
848 }
849
850 static void
851 vm_iommu_modify(struct vm *vm, bool map)
852 {
853 int i, sz;
854 vm_paddr_t gpa, hpa;
855 struct mem_map *mm;
856 void *vp, *cookie, *host_domain;
857
858 sz = PAGE_SIZE;
859 host_domain = iommu_host_domain();
860
861 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
862 mm = &vm->mem_maps[i];
863 if (!sysmem_mapping(vm, mm))
864 continue;
865
866 if (map) {
867 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
868 ("iommu map found invalid memmap %#lx/%#lx/%#x",
869 mm->gpa, mm->len, mm->flags));
870 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
871 continue;
872 mm->flags |= VM_MEMMAP_F_IOMMU;
873 } else {
874 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
875 continue;
876 mm->flags &= ~VM_MEMMAP_F_IOMMU;
877 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
878 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
879 mm->gpa, mm->len, mm->flags));
880 }
881
882 gpa = mm->gpa;
883 while (gpa < mm->gpa + mm->len) {
884 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
885 &cookie);
886 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
887 vm_name(vm), gpa));
888
889 vm_gpa_release(cookie);
890
891 hpa = DMAP_TO_PHYS((uintptr_t)vp);
892 if (map) {
893 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
894 iommu_remove_mapping(host_domain, hpa, sz);
895 } else {
896 iommu_remove_mapping(vm->iommu, gpa, sz);
897 iommu_create_mapping(host_domain, hpa, hpa, sz);
898 }
899
900 gpa += PAGE_SIZE;
901 }
902 }
903
904 /*
905 * Invalidate the cached translations associated with the domain
906 * from which pages were removed.
907 */
908 if (map)
909 iommu_invalidate_tlb(host_domain);
910 else
911 iommu_invalidate_tlb(vm->iommu);
912 }
913
914 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), false)
915 #define vm_iommu_map(vm) vm_iommu_modify((vm), true)
916
917 int
918 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
919 {
920 int error;
921
922 error = ppt_unassign_device(vm, bus, slot, func);
923 if (error)
924 return (error);
925
926 if (ppt_assigned_devices(vm) == 0)
927 vm_iommu_unmap(vm);
928
929 return (0);
930 }
931
932 int
933 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
934 {
935 int error;
936 vm_paddr_t maxaddr;
937
938 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
939 if (ppt_assigned_devices(vm) == 0) {
940 KASSERT(vm->iommu == NULL,
941 ("vm_assign_pptdev: iommu must be NULL"));
942 maxaddr = vmm_sysmem_maxaddr(vm);
943 vm->iommu = iommu_create_domain(maxaddr);
944 if (vm->iommu == NULL)
945 return (ENXIO);
946 vm_iommu_map(vm);
947 }
948
949 error = ppt_assign_device(vm, bus, slot, func);
950 return (error);
951 }
952
953 void *
954 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
955 void **cookie)
956 {
957 int i, count, pageoff;
958 struct mem_map *mm;
959 vm_page_t m;
960 #ifdef INVARIANTS
961 /*
962 * All vcpus are frozen by ioctls that modify the memory map
963 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
964 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
965 */
966 int state;
967 KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d",
968 __func__, vcpuid));
969 for (i = 0; i < vm->maxcpus; i++) {
970 if (vcpuid != -1 && vcpuid != i)
971 continue;
972 state = vcpu_get_state(vm, i, NULL);
973 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
974 __func__, state));
975 }
976 #endif
977 pageoff = gpa & PAGE_MASK;
978 if (len > PAGE_SIZE - pageoff)
979 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
980
981 count = 0;
982 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
983 mm = &vm->mem_maps[i];
984 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
985 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
986 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
987 break;
988 }
989 }
990
991 if (count == 1) {
992 *cookie = m;
993 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
994 } else {
995 *cookie = NULL;
996 return (NULL);
997 }
998 }
999
1000 void
1001 vm_gpa_release(void *cookie)
1002 {
1003 vm_page_t m = cookie;
1004
1005 vm_page_lock(m);
1006 vm_page_unhold(m);
1007 vm_page_unlock(m);
1008 }
1009
1010 int
1011 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
1012 {
1013
1014 if (vcpu < 0 || vcpu >= vm->maxcpus)
1015 return (EINVAL);
1016
1017 if (reg >= VM_REG_LAST)
1018 return (EINVAL);
1019
1020 return (VMGETREG(vm->cookie, vcpu, reg, retval));
1021 }
1022
1023 int
1024 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
1025 {
1026 struct vcpu *vcpu;
1027 int error;
1028
1029 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1030 return (EINVAL);
1031
1032 if (reg >= VM_REG_LAST)
1033 return (EINVAL);
1034
1035 error = VMSETREG(vm->cookie, vcpuid, reg, val);
1036 if (error || reg != VM_REG_GUEST_RIP)
1037 return (error);
1038
1039 /* Set 'nextrip' to match the value of %rip */
1040 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
1041 vcpu = &vm->vcpu[vcpuid];
1042 vcpu->nextrip = val;
1043 return (0);
1044 }
1045
1046 static bool
1047 is_descriptor_table(int reg)
1048 {
1049
1050 switch (reg) {
1051 case VM_REG_GUEST_IDTR:
1052 case VM_REG_GUEST_GDTR:
1053 return (true);
1054 default:
1055 return (false);
1056 }
1057 }
1058
1059 static bool
1060 is_segment_register(int reg)
1061 {
1062
1063 switch (reg) {
1064 case VM_REG_GUEST_ES:
1065 case VM_REG_GUEST_CS:
1066 case VM_REG_GUEST_SS:
1067 case VM_REG_GUEST_DS:
1068 case VM_REG_GUEST_FS:
1069 case VM_REG_GUEST_GS:
1070 case VM_REG_GUEST_TR:
1071 case VM_REG_GUEST_LDTR:
1072 return (true);
1073 default:
1074 return (false);
1075 }
1076 }
1077
1078 int
1079 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1080 struct seg_desc *desc)
1081 {
1082
1083 if (vcpu < 0 || vcpu >= vm->maxcpus)
1084 return (EINVAL);
1085
1086 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1087 return (EINVAL);
1088
1089 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1090 }
1091
1092 int
1093 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1094 struct seg_desc *desc)
1095 {
1096 if (vcpu < 0 || vcpu >= vm->maxcpus)
1097 return (EINVAL);
1098
1099 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1100 return (EINVAL);
1101
1102 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1103 }
1104
1105 static void
1106 restore_guest_fpustate(struct vcpu *vcpu)
1107 {
1108
1109 /* flush host state to the pcb */
1110 fpuexit(curthread);
1111
1112 /* restore guest FPU state */
1113 fpu_stop_emulating();
1114 fpurestore(vcpu->guestfpu);
1115
1116 /* restore guest XCR0 if XSAVE is enabled in the host */
1117 if (rcr4() & CR4_XSAVE)
1118 load_xcr(0, vcpu->guest_xcr0);
1119
1120 /*
1121 * The FPU is now "dirty" with the guest's state so turn on emulation
1122 * to trap any access to the FPU by the host.
1123 */
1124 fpu_start_emulating();
1125 }
1126
1127 static void
1128 save_guest_fpustate(struct vcpu *vcpu)
1129 {
1130
1131 if ((rcr0() & CR0_TS) == 0)
1132 panic("fpu emulation not enabled in host!");
1133
1134 /* save guest XCR0 and restore host XCR0 */
1135 if (rcr4() & CR4_XSAVE) {
1136 vcpu->guest_xcr0 = rxcr(0);
1137 load_xcr(0, vmm_get_host_xcr0());
1138 }
1139
1140 /* save guest FPU state */
1141 fpu_stop_emulating();
1142 fpusave(vcpu->guestfpu);
1143 fpu_start_emulating();
1144 }
1145
1146 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1147
1148 static int
1149 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1150 bool from_idle)
1151 {
1152 struct vcpu *vcpu;
1153 int error;
1154
1155 vcpu = &vm->vcpu[vcpuid];
1156 vcpu_assert_locked(vcpu);
1157
1158 /*
1159 * State transitions from the vmmdev_ioctl() must always begin from
1160 * the VCPU_IDLE state. This guarantees that there is only a single
1161 * ioctl() operating on a vcpu at any point.
1162 */
1163 if (from_idle) {
1164 while (vcpu->state != VCPU_IDLE) {
1165 vcpu->reqidle = 1;
1166 vcpu_notify_event_locked(vcpu, false);
1167 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1168 "idle requested", vcpu_state2str(vcpu->state));
1169 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1170 }
1171 } else {
1172 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1173 "vcpu idle state"));
1174 }
1175
1176 if (vcpu->state == VCPU_RUNNING) {
1177 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1178 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1179 } else {
1180 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1181 "vcpu that is not running", vcpu->hostcpu));
1182 }
1183
1184 /*
1185 * The following state transitions are allowed:
1186 * IDLE -> FROZEN -> IDLE
1187 * FROZEN -> RUNNING -> FROZEN
1188 * FROZEN -> SLEEPING -> FROZEN
1189 */
1190 switch (vcpu->state) {
1191 case VCPU_IDLE:
1192 case VCPU_RUNNING:
1193 case VCPU_SLEEPING:
1194 error = (newstate != VCPU_FROZEN);
1195 break;
1196 case VCPU_FROZEN:
1197 error = (newstate == VCPU_FROZEN);
1198 break;
1199 default:
1200 error = 1;
1201 break;
1202 }
1203
1204 if (error)
1205 return (EBUSY);
1206
1207 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1208 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1209
1210 vcpu->state = newstate;
1211 if (newstate == VCPU_RUNNING)
1212 vcpu->hostcpu = curcpu;
1213 else
1214 vcpu->hostcpu = NOCPU;
1215
1216 if (newstate == VCPU_IDLE)
1217 wakeup(&vcpu->state);
1218
1219 return (0);
1220 }
1221
1222 static void
1223 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1224 {
1225 int error;
1226
1227 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1228 panic("Error %d setting state to %d\n", error, newstate);
1229 }
1230
1231 static void
1232 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1233 {
1234 int error;
1235
1236 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1237 panic("Error %d setting state to %d", error, newstate);
1238 }
1239
1240 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1241 do { \
1242 if (vcpuid >= 0) \
1243 VCPU_CTR0(vm, vcpuid, fmt); \
1244 else \
1245 VM_CTR0(vm, fmt); \
1246 } while (0)
1247
1248 static int
1249 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1250 {
1251 struct thread *td;
1252 int error;
1253
1254 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
1255 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1256
1257 error = 0;
1258 td = curthread;
1259 mtx_lock(&vm->rendezvous_mtx);
1260 while (vm->rendezvous_func != NULL) {
1261 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1262 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1263
1264 if (vcpuid != -1 &&
1265 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1266 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1267 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1268 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1269 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1270 }
1271 if (CPU_CMP(&vm->rendezvous_req_cpus,
1272 &vm->rendezvous_done_cpus) == 0) {
1273 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1274 vm->rendezvous_func = NULL;
1275 wakeup(&vm->rendezvous_func);
1276 break;
1277 }
1278 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1279 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1280 "vmrndv", hz);
1281 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1282 mtx_unlock(&vm->rendezvous_mtx);
1283 error = thread_check_susp(td, true);
1284 if (error != 0)
1285 return (error);
1286 mtx_lock(&vm->rendezvous_mtx);
1287 }
1288 }
1289 mtx_unlock(&vm->rendezvous_mtx);
1290 return (0);
1291 }
1292
1293 /*
1294 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1295 */
1296 static int
1297 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1298 {
1299 struct vcpu *vcpu;
1300 const char *wmesg;
1301 struct thread *td;
1302 int error, t, vcpu_halted, vm_halted;
1303
1304 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1305
1306 vcpu = &vm->vcpu[vcpuid];
1307 vcpu_halted = 0;
1308 vm_halted = 0;
1309 error = 0;
1310 td = curthread;
1311
1312 vcpu_lock(vcpu);
1313 while (1) {
1314 /*
1315 * Do a final check for pending NMI or interrupts before
1316 * really putting this thread to sleep. Also check for
1317 * software events that would cause this vcpu to wakeup.
1318 *
1319 * These interrupts/events could have happened after the
1320 * vcpu returned from VMRUN() and before it acquired the
1321 * vcpu lock above.
1322 */
1323 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1324 break;
1325 if (vm_nmi_pending(vm, vcpuid))
1326 break;
1327 if (!intr_disabled) {
1328 if (vm_extint_pending(vm, vcpuid) ||
1329 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1330 break;
1331 }
1332 }
1333
1334 /* Don't go to sleep if the vcpu thread needs to yield */
1335 if (vcpu_should_yield(vm, vcpuid))
1336 break;
1337
1338 if (vcpu_debugged(vm, vcpuid))
1339 break;
1340
1341 /*
1342 * Some Linux guests implement "halt" by having all vcpus
1343 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1344 * track of the vcpus that have entered this state. When all
1345 * vcpus enter the halted state the virtual machine is halted.
1346 */
1347 if (intr_disabled) {
1348 wmesg = "vmhalt";
1349 VCPU_CTR0(vm, vcpuid, "Halted");
1350 if (!vcpu_halted && halt_detection_enabled) {
1351 vcpu_halted = 1;
1352 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1353 }
1354 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1355 vm_halted = 1;
1356 break;
1357 }
1358 } else {
1359 wmesg = "vmidle";
1360 }
1361
1362 t = ticks;
1363 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1364 /*
1365 * XXX msleep_spin() cannot be interrupted by signals so
1366 * wake up periodically to check pending signals.
1367 */
1368 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1369 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1370 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1371 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1372 vcpu_unlock(vcpu);
1373 error = thread_check_susp(td, false);
1374 if (error != 0)
1375 return (error);
1376 vcpu_lock(vcpu);
1377 }
1378 }
1379
1380 if (vcpu_halted)
1381 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1382
1383 vcpu_unlock(vcpu);
1384
1385 if (vm_halted)
1386 vm_suspend(vm, VM_SUSPEND_HALT);
1387
1388 return (0);
1389 }
1390
1391 static int
1392 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1393 {
1394 int rv, ftype;
1395 struct vm_map *map;
1396 struct vcpu *vcpu;
1397 struct vm_exit *vme;
1398
1399 vcpu = &vm->vcpu[vcpuid];
1400 vme = &vcpu->exitinfo;
1401
1402 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1403 __func__, vme->inst_length));
1404
1405 ftype = vme->u.paging.fault_type;
1406 KASSERT(ftype == VM_PROT_READ ||
1407 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1408 ("vm_handle_paging: invalid fault_type %d", ftype));
1409
1410 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1411 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1412 vme->u.paging.gpa, ftype);
1413 if (rv == 0) {
1414 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1415 ftype == VM_PROT_READ ? "accessed" : "dirty",
1416 vme->u.paging.gpa);
1417 goto done;
1418 }
1419 }
1420
1421 map = &vm->vmspace->vm_map;
1422 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1423
1424 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1425 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1426
1427 if (rv != KERN_SUCCESS)
1428 return (EFAULT);
1429 done:
1430 return (0);
1431 }
1432
1433 static int
1434 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1435 {
1436 struct vie *vie;
1437 struct vcpu *vcpu;
1438 struct vm_exit *vme;
1439 uint64_t gla, gpa, cs_base;
1440 struct vm_guest_paging *paging;
1441 mem_region_read_t mread;
1442 mem_region_write_t mwrite;
1443 enum vm_cpu_mode cpu_mode;
1444 int cs_d, error, fault;
1445
1446 vcpu = &vm->vcpu[vcpuid];
1447 vme = &vcpu->exitinfo;
1448
1449 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1450 __func__, vme->inst_length));
1451
1452 gla = vme->u.inst_emul.gla;
1453 gpa = vme->u.inst_emul.gpa;
1454 cs_base = vme->u.inst_emul.cs_base;
1455 cs_d = vme->u.inst_emul.cs_d;
1456 vie = &vme->u.inst_emul.vie;
1457 paging = &vme->u.inst_emul.paging;
1458 cpu_mode = paging->cpu_mode;
1459
1460 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1461
1462 /* Fetch, decode and emulate the faulting instruction */
1463 if (vie->num_valid == 0) {
1464 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1465 cs_base, VIE_INST_SIZE, vie, &fault);
1466 } else {
1467 /*
1468 * The instruction bytes have already been copied into 'vie'
1469 */
1470 error = fault = 0;
1471 }
1472 if (error || fault)
1473 return (error);
1474
1475 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1476 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1477 vme->rip + cs_base);
1478 *retu = true; /* dump instruction bytes in userspace */
1479 return (0);
1480 }
1481
1482 /*
1483 * Update 'nextrip' based on the length of the emulated instruction.
1484 */
1485 vme->inst_length = vie->num_processed;
1486 vcpu->nextrip += vie->num_processed;
1487 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1488 "decoding", vcpu->nextrip);
1489
1490 /* return to userland unless this is an in-kernel emulated device */
1491 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1492 mread = lapic_mmio_read;
1493 mwrite = lapic_mmio_write;
1494 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1495 mread = vioapic_mmio_read;
1496 mwrite = vioapic_mmio_write;
1497 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1498 mread = vhpet_mmio_read;
1499 mwrite = vhpet_mmio_write;
1500 } else {
1501 *retu = true;
1502 return (0);
1503 }
1504
1505 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1506 mread, mwrite, retu);
1507
1508 return (error);
1509 }
1510
1511 static int
1512 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1513 {
1514 int error, i;
1515 struct vcpu *vcpu;
1516 struct thread *td;
1517
1518 error = 0;
1519 vcpu = &vm->vcpu[vcpuid];
1520 td = curthread;
1521
1522 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1523
1524 /*
1525 * Wait until all 'active_cpus' have suspended themselves.
1526 *
1527 * Since a VM may be suspended at any time including when one or
1528 * more vcpus are doing a rendezvous we need to call the rendezvous
1529 * handler while we are waiting to prevent a deadlock.
1530 */
1531 vcpu_lock(vcpu);
1532 while (error == 0) {
1533 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1534 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1535 break;
1536 }
1537
1538 if (vm->rendezvous_func == NULL) {
1539 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1540 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1541 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1542 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1543 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1544 vcpu_unlock(vcpu);
1545 error = thread_check_susp(td, false);
1546 vcpu_lock(vcpu);
1547 }
1548 } else {
1549 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1550 vcpu_unlock(vcpu);
1551 error = vm_handle_rendezvous(vm, vcpuid);
1552 vcpu_lock(vcpu);
1553 }
1554 }
1555 vcpu_unlock(vcpu);
1556
1557 /*
1558 * Wakeup the other sleeping vcpus and return to userspace.
1559 */
1560 for (i = 0; i < vm->maxcpus; i++) {
1561 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1562 vcpu_notify_event(vm, i, false);
1563 }
1564 }
1565
1566 *retu = true;
1567 return (error);
1568 }
1569
1570 static int
1571 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1572 {
1573 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1574
1575 vcpu_lock(vcpu);
1576 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1577 vcpu->reqidle = 0;
1578 vcpu_unlock(vcpu);
1579 *retu = true;
1580 return (0);
1581 }
1582
1583 int
1584 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1585 {
1586 int i;
1587
1588 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1589 return (EINVAL);
1590
1591 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1592 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1593 vm->suspend, how);
1594 return (EALREADY);
1595 }
1596
1597 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1598
1599 /*
1600 * Notify all active vcpus that they are now suspended.
1601 */
1602 for (i = 0; i < vm->maxcpus; i++) {
1603 if (CPU_ISSET(i, &vm->active_cpus))
1604 vcpu_notify_event(vm, i, false);
1605 }
1606
1607 return (0);
1608 }
1609
1610 void
1611 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1612 {
1613 struct vm_exit *vmexit;
1614
1615 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1616 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1617
1618 vmexit = vm_exitinfo(vm, vcpuid);
1619 vmexit->rip = rip;
1620 vmexit->inst_length = 0;
1621 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1622 vmexit->u.suspended.how = vm->suspend;
1623 }
1624
1625 void
1626 vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip)
1627 {
1628 struct vm_exit *vmexit;
1629
1630 vmexit = vm_exitinfo(vm, vcpuid);
1631 vmexit->rip = rip;
1632 vmexit->inst_length = 0;
1633 vmexit->exitcode = VM_EXITCODE_DEBUG;
1634 }
1635
1636 void
1637 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1638 {
1639 struct vm_exit *vmexit;
1640
1641 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1642
1643 vmexit = vm_exitinfo(vm, vcpuid);
1644 vmexit->rip = rip;
1645 vmexit->inst_length = 0;
1646 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1647 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1648 }
1649
1650 void
1651 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1652 {
1653 struct vm_exit *vmexit;
1654
1655 vmexit = vm_exitinfo(vm, vcpuid);
1656 vmexit->rip = rip;
1657 vmexit->inst_length = 0;
1658 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1659 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1660 }
1661
1662 void
1663 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1664 {
1665 struct vm_exit *vmexit;
1666
1667 vmexit = vm_exitinfo(vm, vcpuid);
1668 vmexit->rip = rip;
1669 vmexit->inst_length = 0;
1670 vmexit->exitcode = VM_EXITCODE_BOGUS;
1671 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1672 }
1673
1674 int
1675 vm_run(struct vm *vm, struct vm_run *vmrun)
1676 {
1677 struct vm_eventinfo evinfo;
1678 int error, vcpuid;
1679 struct vcpu *vcpu;
1680 struct pcb *pcb;
1681 uint64_t tscval;
1682 struct vm_exit *vme;
1683 bool retu, intr_disabled;
1684 pmap_t pmap;
1685
1686 vcpuid = vmrun->cpuid;
1687
1688 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1689 return (EINVAL);
1690
1691 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1692 return (EINVAL);
1693
1694 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1695 return (EINVAL);
1696
1697 pmap = vmspace_pmap(vm->vmspace);
1698 vcpu = &vm->vcpu[vcpuid];
1699 vme = &vcpu->exitinfo;
1700 evinfo.rptr = &vm->rendezvous_func;
1701 evinfo.sptr = &vm->suspend;
1702 evinfo.iptr = &vcpu->reqidle;
1703 restart:
1704 critical_enter();
1705
1706 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1707 ("vm_run: absurd pm_active"));
1708
1709 tscval = rdtsc();
1710
1711 pcb = PCPU_GET(curpcb);
1712 set_pcb_flags(pcb, PCB_FULL_IRET);
1713
1714 restore_guest_fpustate(vcpu);
1715
1716 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1717 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1718 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1719
1720 save_guest_fpustate(vcpu);
1721
1722 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1723
1724 critical_exit();
1725
1726 if (error == 0) {
1727 retu = false;
1728 vcpu->nextrip = vme->rip + vme->inst_length;
1729 switch (vme->exitcode) {
1730 case VM_EXITCODE_REQIDLE:
1731 error = vm_handle_reqidle(vm, vcpuid, &retu);
1732 break;
1733 case VM_EXITCODE_SUSPENDED:
1734 error = vm_handle_suspend(vm, vcpuid, &retu);
1735 break;
1736 case VM_EXITCODE_IOAPIC_EOI:
1737 vioapic_process_eoi(vm, vcpuid,
1738 vme->u.ioapic_eoi.vector);
1739 break;
1740 case VM_EXITCODE_RENDEZVOUS:
1741 error = vm_handle_rendezvous(vm, vcpuid);
1742 break;
1743 case VM_EXITCODE_HLT:
1744 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1745 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1746 break;
1747 case VM_EXITCODE_PAGING:
1748 error = vm_handle_paging(vm, vcpuid, &retu);
1749 break;
1750 case VM_EXITCODE_INST_EMUL:
1751 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1752 break;
1753 case VM_EXITCODE_INOUT:
1754 case VM_EXITCODE_INOUT_STR:
1755 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1756 break;
1757 case VM_EXITCODE_MONITOR:
1758 case VM_EXITCODE_MWAIT:
1759 case VM_EXITCODE_VMINSN:
1760 vm_inject_ud(vm, vcpuid);
1761 break;
1762 default:
1763 retu = true; /* handled in userland */
1764 break;
1765 }
1766 }
1767
1768 if (error == 0 && retu == false)
1769 goto restart;
1770
1771 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1772
1773 /* copy the exit information */
1774 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1775 return (error);
1776 }
1777
1778 int
1779 vm_restart_instruction(void *arg, int vcpuid)
1780 {
1781 struct vm *vm;
1782 struct vcpu *vcpu;
1783 enum vcpu_state state;
1784 uint64_t rip;
1785 int error;
1786
1787 vm = arg;
1788 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1789 return (EINVAL);
1790
1791 vcpu = &vm->vcpu[vcpuid];
1792 state = vcpu_get_state(vm, vcpuid, NULL);
1793 if (state == VCPU_RUNNING) {
1794 /*
1795 * When a vcpu is "running" the next instruction is determined
1796 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1797 * Thus setting 'inst_length' to zero will cause the current
1798 * instruction to be restarted.
1799 */
1800 vcpu->exitinfo.inst_length = 0;
1801 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1802 "setting inst_length to zero", vcpu->exitinfo.rip);
1803 } else if (state == VCPU_FROZEN) {
1804 /*
1805 * When a vcpu is "frozen" it is outside the critical section
1806 * around VMRUN() and 'nextrip' points to the next instruction.
1807 * Thus instruction restart is achieved by setting 'nextrip'
1808 * to the vcpu's %rip.
1809 */
1810 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1811 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1812 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1813 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1814 vcpu->nextrip = rip;
1815 } else {
1816 panic("%s: invalid state %d", __func__, state);
1817 }
1818 return (0);
1819 }
1820
1821 int
1822 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1823 {
1824 struct vcpu *vcpu;
1825 int type, vector;
1826
1827 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1828 return (EINVAL);
1829
1830 vcpu = &vm->vcpu[vcpuid];
1831
1832 if (info & VM_INTINFO_VALID) {
1833 type = info & VM_INTINFO_TYPE;
1834 vector = info & 0xff;
1835 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1836 return (EINVAL);
1837 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1838 return (EINVAL);
1839 if (info & VM_INTINFO_RSVD)
1840 return (EINVAL);
1841 } else {
1842 info = 0;
1843 }
1844 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1845 vcpu->exitintinfo = info;
1846 return (0);
1847 }
1848
1849 enum exc_class {
1850 EXC_BENIGN,
1851 EXC_CONTRIBUTORY,
1852 EXC_PAGEFAULT
1853 };
1854
1855 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1856
1857 static enum exc_class
1858 exception_class(uint64_t info)
1859 {
1860 int type, vector;
1861
1862 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1863 type = info & VM_INTINFO_TYPE;
1864 vector = info & 0xff;
1865
1866 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1867 switch (type) {
1868 case VM_INTINFO_HWINTR:
1869 case VM_INTINFO_SWINTR:
1870 case VM_INTINFO_NMI:
1871 return (EXC_BENIGN);
1872 default:
1873 /*
1874 * Hardware exception.
1875 *
1876 * SVM and VT-x use identical type values to represent NMI,
1877 * hardware interrupt and software interrupt.
1878 *
1879 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1880 * for exceptions except #BP and #OF. #BP and #OF use a type
1881 * value of '5' or '6'. Therefore we don't check for explicit
1882 * values of 'type' to classify 'intinfo' into a hardware
1883 * exception.
1884 */
1885 break;
1886 }
1887
1888 switch (vector) {
1889 case IDT_PF:
1890 case IDT_VE:
1891 return (EXC_PAGEFAULT);
1892 case IDT_DE:
1893 case IDT_TS:
1894 case IDT_NP:
1895 case IDT_SS:
1896 case IDT_GP:
1897 return (EXC_CONTRIBUTORY);
1898 default:
1899 return (EXC_BENIGN);
1900 }
1901 }
1902
1903 static int
1904 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1905 uint64_t *retinfo)
1906 {
1907 enum exc_class exc1, exc2;
1908 int type1, vector1;
1909
1910 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1911 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1912
1913 /*
1914 * If an exception occurs while attempting to call the double-fault
1915 * handler the processor enters shutdown mode (aka triple fault).
1916 */
1917 type1 = info1 & VM_INTINFO_TYPE;
1918 vector1 = info1 & 0xff;
1919 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1920 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1921 info1, info2);
1922 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1923 *retinfo = 0;
1924 return (0);
1925 }
1926
1927 /*
1928 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1929 */
1930 exc1 = exception_class(info1);
1931 exc2 = exception_class(info2);
1932 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1933 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1934 /* Convert nested fault into a double fault. */
1935 *retinfo = IDT_DF;
1936 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1937 *retinfo |= VM_INTINFO_DEL_ERRCODE;
1938 } else {
1939 /* Handle exceptions serially */
1940 *retinfo = info2;
1941 }
1942 return (1);
1943 }
1944
1945 static uint64_t
1946 vcpu_exception_intinfo(struct vcpu *vcpu)
1947 {
1948 uint64_t info = 0;
1949
1950 if (vcpu->exception_pending) {
1951 info = vcpu->exc_vector & 0xff;
1952 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1953 if (vcpu->exc_errcode_valid) {
1954 info |= VM_INTINFO_DEL_ERRCODE;
1955 info |= (uint64_t)vcpu->exc_errcode << 32;
1956 }
1957 }
1958 return (info);
1959 }
1960
1961 int
1962 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1963 {
1964 struct vcpu *vcpu;
1965 uint64_t info1, info2;
1966 int valid;
1967
1968 KASSERT(vcpuid >= 0 &&
1969 vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
1970
1971 vcpu = &vm->vcpu[vcpuid];
1972
1973 info1 = vcpu->exitintinfo;
1974 vcpu->exitintinfo = 0;
1975
1976 info2 = 0;
1977 if (vcpu->exception_pending) {
1978 info2 = vcpu_exception_intinfo(vcpu);
1979 vcpu->exception_pending = 0;
1980 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1981 vcpu->exc_vector, info2);
1982 }
1983
1984 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1985 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1986 } else if (info1 & VM_INTINFO_VALID) {
1987 *retinfo = info1;
1988 valid = 1;
1989 } else if (info2 & VM_INTINFO_VALID) {
1990 *retinfo = info2;
1991 valid = 1;
1992 } else {
1993 valid = 0;
1994 }
1995
1996 if (valid) {
1997 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1998 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1999 }
2000
2001 return (valid);
2002 }
2003
2004 int
2005 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
2006 {
2007 struct vcpu *vcpu;
2008
2009 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2010 return (EINVAL);
2011
2012 vcpu = &vm->vcpu[vcpuid];
2013 *info1 = vcpu->exitintinfo;
2014 *info2 = vcpu_exception_intinfo(vcpu);
2015 return (0);
2016 }
2017
2018 int
2019 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
2020 uint32_t errcode, int restart_instruction)
2021 {
2022 struct vcpu *vcpu;
2023 uint64_t regval;
2024 int error;
2025
2026 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2027 return (EINVAL);
2028
2029 if (vector < 0 || vector >= 32)
2030 return (EINVAL);
2031
2032 /*
2033 * A double fault exception should never be injected directly into
2034 * the guest. It is a derived exception that results from specific
2035 * combinations of nested faults.
2036 */
2037 if (vector == IDT_DF)
2038 return (EINVAL);
2039
2040 vcpu = &vm->vcpu[vcpuid];
2041
2042 if (vcpu->exception_pending) {
2043 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
2044 "pending exception %d", vector, vcpu->exc_vector);
2045 return (EBUSY);
2046 }
2047
2048 if (errcode_valid) {
2049 /*
2050 * Exceptions don't deliver an error code in real mode.
2051 */
2052 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val);
2053 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
2054 if (!(regval & CR0_PE))
2055 errcode_valid = 0;
2056 }
2057
2058 /*
2059 * From section 26.6.1 "Interruptibility State" in Intel SDM:
2060 *
2061 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
2062 * one instruction or incurs an exception.
2063 */
2064 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
2065 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
2066 __func__, error));
2067
2068 if (restart_instruction)
2069 vm_restart_instruction(vm, vcpuid);
2070
2071 vcpu->exception_pending = 1;
2072 vcpu->exc_vector = vector;
2073 vcpu->exc_errcode = errcode;
2074 vcpu->exc_errcode_valid = errcode_valid;
2075 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
2076 return (0);
2077 }
2078
2079 void
2080 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
2081 int errcode)
2082 {
2083 struct vm *vm;
2084 int error, restart_instruction;
2085
2086 vm = vmarg;
2087 restart_instruction = 1;
2088
2089 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2090 errcode, restart_instruction);
2091 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2092 }
2093
2094 void
2095 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
2096 {
2097 struct vm *vm;
2098 int error;
2099
2100 vm = vmarg;
2101 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2102 error_code, cr2);
2103
2104 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2105 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2106
2107 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2108 }
2109
2110 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2111
2112 int
2113 vm_inject_nmi(struct vm *vm, int vcpuid)
2114 {
2115 struct vcpu *vcpu;
2116
2117 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2118 return (EINVAL);
2119
2120 vcpu = &vm->vcpu[vcpuid];
2121
2122 vcpu->nmi_pending = 1;
2123 vcpu_notify_event(vm, vcpuid, false);
2124 return (0);
2125 }
2126
2127 int
2128 vm_nmi_pending(struct vm *vm, int vcpuid)
2129 {
2130 struct vcpu *vcpu;
2131
2132 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2133 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2134
2135 vcpu = &vm->vcpu[vcpuid];
2136
2137 return (vcpu->nmi_pending);
2138 }
2139
2140 void
2141 vm_nmi_clear(struct vm *vm, int vcpuid)
2142 {
2143 struct vcpu *vcpu;
2144
2145 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2146 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2147
2148 vcpu = &vm->vcpu[vcpuid];
2149
2150 if (vcpu->nmi_pending == 0)
2151 panic("vm_nmi_clear: inconsistent nmi_pending state");
2152
2153 vcpu->nmi_pending = 0;
2154 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2155 }
2156
2157 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2158
2159 int
2160 vm_inject_extint(struct vm *vm, int vcpuid)
2161 {
2162 struct vcpu *vcpu;
2163
2164 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2165 return (EINVAL);
2166
2167 vcpu = &vm->vcpu[vcpuid];
2168
2169 vcpu->extint_pending = 1;
2170 vcpu_notify_event(vm, vcpuid, false);
2171 return (0);
2172 }
2173
2174 int
2175 vm_extint_pending(struct vm *vm, int vcpuid)
2176 {
2177 struct vcpu *vcpu;
2178
2179 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2180 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2181
2182 vcpu = &vm->vcpu[vcpuid];
2183
2184 return (vcpu->extint_pending);
2185 }
2186
2187 void
2188 vm_extint_clear(struct vm *vm, int vcpuid)
2189 {
2190 struct vcpu *vcpu;
2191
2192 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2193 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2194
2195 vcpu = &vm->vcpu[vcpuid];
2196
2197 if (vcpu->extint_pending == 0)
2198 panic("vm_extint_clear: inconsistent extint_pending state");
2199
2200 vcpu->extint_pending = 0;
2201 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2202 }
2203
2204 int
2205 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2206 {
2207 if (vcpu < 0 || vcpu >= vm->maxcpus)
2208 return (EINVAL);
2209
2210 if (type < 0 || type >= VM_CAP_MAX)
2211 return (EINVAL);
2212
2213 return (VMGETCAP(vm->cookie, vcpu, type, retval));
2214 }
2215
2216 int
2217 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2218 {
2219 if (vcpu < 0 || vcpu >= vm->maxcpus)
2220 return (EINVAL);
2221
2222 if (type < 0 || type >= VM_CAP_MAX)
2223 return (EINVAL);
2224
2225 return (VMSETCAP(vm->cookie, vcpu, type, val));
2226 }
2227
2228 struct vlapic *
2229 vm_lapic(struct vm *vm, int cpu)
2230 {
2231 return (vm->vcpu[cpu].vlapic);
2232 }
2233
2234 struct vioapic *
2235 vm_ioapic(struct vm *vm)
2236 {
2237
2238 return (vm->vioapic);
2239 }
2240
2241 struct vhpet *
2242 vm_hpet(struct vm *vm)
2243 {
2244
2245 return (vm->vhpet);
2246 }
2247
2248 bool
2249 vmm_is_pptdev(int bus, int slot, int func)
2250 {
2251 int b, f, i, n, s;
2252 char *val, *cp, *cp2;
2253 bool found;
2254
2255 /*
2256 * XXX
2257 * The length of an environment variable is limited to 128 bytes which
2258 * puts an upper limit on the number of passthru devices that may be
2259 * specified using a single environment variable.
2260 *
2261 * Work around this by scanning multiple environment variable
2262 * names instead of a single one - yuck!
2263 */
2264 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2265
2266 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2267 found = false;
2268 for (i = 0; names[i] != NULL && !found; i++) {
2269 cp = val = kern_getenv(names[i]);
2270 while (cp != NULL && *cp != '\0') {
2271 if ((cp2 = strchr(cp, ' ')) != NULL)
2272 *cp2 = '\0';
2273
2274 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2275 if (n == 3 && bus == b && slot == s && func == f) {
2276 found = true;
2277 break;
2278 }
2279
2280 if (cp2 != NULL)
2281 *cp2++ = ' ';
2282
2283 cp = cp2;
2284 }
2285 freeenv(val);
2286 }
2287 return (found);
2288 }
2289
2290 void *
2291 vm_iommu_domain(struct vm *vm)
2292 {
2293
2294 return (vm->iommu);
2295 }
2296
2297 int
2298 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2299 bool from_idle)
2300 {
2301 int error;
2302 struct vcpu *vcpu;
2303
2304 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2305 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2306
2307 vcpu = &vm->vcpu[vcpuid];
2308
2309 vcpu_lock(vcpu);
2310 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2311 vcpu_unlock(vcpu);
2312
2313 return (error);
2314 }
2315
2316 enum vcpu_state
2317 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2318 {
2319 struct vcpu *vcpu;
2320 enum vcpu_state state;
2321
2322 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2323 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2324
2325 vcpu = &vm->vcpu[vcpuid];
2326
2327 vcpu_lock(vcpu);
2328 state = vcpu->state;
2329 if (hostcpu != NULL)
2330 *hostcpu = vcpu->hostcpu;
2331 vcpu_unlock(vcpu);
2332
2333 return (state);
2334 }
2335
2336 int
2337 vm_activate_cpu(struct vm *vm, int vcpuid)
2338 {
2339
2340 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2341 return (EINVAL);
2342
2343 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2344 return (EBUSY);
2345
2346 VCPU_CTR0(vm, vcpuid, "activated");
2347 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2348 return (0);
2349 }
2350
2351 int
2352 vm_suspend_cpu(struct vm *vm, int vcpuid)
2353 {
2354 int i;
2355
2356 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2357 return (EINVAL);
2358
2359 if (vcpuid == -1) {
2360 vm->debug_cpus = vm->active_cpus;
2361 for (i = 0; i < vm->maxcpus; i++) {
2362 if (CPU_ISSET(i, &vm->active_cpus))
2363 vcpu_notify_event(vm, i, false);
2364 }
2365 } else {
2366 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
2367 return (EINVAL);
2368
2369 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
2370 vcpu_notify_event(vm, vcpuid, false);
2371 }
2372 return (0);
2373 }
2374
2375 int
2376 vm_resume_cpu(struct vm *vm, int vcpuid)
2377 {
2378
2379 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2380 return (EINVAL);
2381
2382 if (vcpuid == -1) {
2383 CPU_ZERO(&vm->debug_cpus);
2384 } else {
2385 if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
2386 return (EINVAL);
2387
2388 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
2389 }
2390 return (0);
2391 }
2392
2393 int
2394 vcpu_debugged(struct vm *vm, int vcpuid)
2395 {
2396
2397 return (CPU_ISSET(vcpuid, &vm->debug_cpus));
2398 }
2399
2400 cpuset_t
2401 vm_active_cpus(struct vm *vm)
2402 {
2403
2404 return (vm->active_cpus);
2405 }
2406
2407 cpuset_t
2408 vm_debug_cpus(struct vm *vm)
2409 {
2410
2411 return (vm->debug_cpus);
2412 }
2413
2414 cpuset_t
2415 vm_suspended_cpus(struct vm *vm)
2416 {
2417
2418 return (vm->suspended_cpus);
2419 }
2420
2421 void *
2422 vcpu_stats(struct vm *vm, int vcpuid)
2423 {
2424
2425 return (vm->vcpu[vcpuid].stats);
2426 }
2427
2428 int
2429 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2430 {
2431 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2432 return (EINVAL);
2433
2434 *state = vm->vcpu[vcpuid].x2apic_state;
2435
2436 return (0);
2437 }
2438
2439 int
2440 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2441 {
2442 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2443 return (EINVAL);
2444
2445 if (state >= X2APIC_STATE_LAST)
2446 return (EINVAL);
2447
2448 vm->vcpu[vcpuid].x2apic_state = state;
2449
2450 vlapic_set_x2apic_state(vm, vcpuid, state);
2451
2452 return (0);
2453 }
2454
2455 /*
2456 * This function is called to ensure that a vcpu "sees" a pending event
2457 * as soon as possible:
2458 * - If the vcpu thread is sleeping then it is woken up.
2459 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2460 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2461 */
2462 static void
2463 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2464 {
2465 int hostcpu;
2466
2467 hostcpu = vcpu->hostcpu;
2468 if (vcpu->state == VCPU_RUNNING) {
2469 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2470 if (hostcpu != curcpu) {
2471 if (lapic_intr) {
2472 vlapic_post_intr(vcpu->vlapic, hostcpu,
2473 vmm_ipinum);
2474 } else {
2475 ipi_cpu(hostcpu, vmm_ipinum);
2476 }
2477 } else {
2478 /*
2479 * If the 'vcpu' is running on 'curcpu' then it must
2480 * be sending a notification to itself (e.g. SELF_IPI).
2481 * The pending event will be picked up when the vcpu
2482 * transitions back to guest context.
2483 */
2484 }
2485 } else {
2486 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2487 "with hostcpu %d", vcpu->state, hostcpu));
2488 if (vcpu->state == VCPU_SLEEPING)
2489 wakeup_one(vcpu);
2490 }
2491 }
2492
2493 void
2494 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2495 {
2496 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2497
2498 vcpu_lock(vcpu);
2499 vcpu_notify_event_locked(vcpu, lapic_intr);
2500 vcpu_unlock(vcpu);
2501 }
2502
2503 struct vmspace *
2504 vm_get_vmspace(struct vm *vm)
2505 {
2506
2507 return (vm->vmspace);
2508 }
2509
2510 int
2511 vm_apicid2vcpuid(struct vm *vm, int apicid)
2512 {
2513 /*
2514 * XXX apic id is assumed to be numerically identical to vcpu id
2515 */
2516 return (apicid);
2517 }
2518
2519 int
2520 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2521 vm_rendezvous_func_t func, void *arg)
2522 {
2523 int error, i;
2524
2525 /*
2526 * Enforce that this function is called without any locks
2527 */
2528 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2529 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
2530 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2531
2532 restart:
2533 mtx_lock(&vm->rendezvous_mtx);
2534 if (vm->rendezvous_func != NULL) {
2535 /*
2536 * If a rendezvous is already in progress then we need to
2537 * call the rendezvous handler in case this 'vcpuid' is one
2538 * of the targets of the rendezvous.
2539 */
2540 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2541 mtx_unlock(&vm->rendezvous_mtx);
2542 error = vm_handle_rendezvous(vm, vcpuid);
2543 if (error != 0)
2544 return (error);
2545 goto restart;
2546 }
2547 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2548 "rendezvous is still in progress"));
2549
2550 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2551 vm->rendezvous_req_cpus = dest;
2552 CPU_ZERO(&vm->rendezvous_done_cpus);
2553 vm->rendezvous_arg = arg;
2554 vm->rendezvous_func = func;
2555 mtx_unlock(&vm->rendezvous_mtx);
2556
2557 /*
2558 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2559 * vcpus so they handle the rendezvous as soon as possible.
2560 */
2561 for (i = 0; i < vm->maxcpus; i++) {
2562 if (CPU_ISSET(i, &dest))
2563 vcpu_notify_event(vm, i, false);
2564 }
2565
2566 return (vm_handle_rendezvous(vm, vcpuid));
2567 }
2568
2569 struct vatpic *
2570 vm_atpic(struct vm *vm)
2571 {
2572 return (vm->vatpic);
2573 }
2574
2575 struct vatpit *
2576 vm_atpit(struct vm *vm)
2577 {
2578 return (vm->vatpit);
2579 }
2580
2581 struct vpmtmr *
2582 vm_pmtmr(struct vm *vm)
2583 {
2584
2585 return (vm->vpmtmr);
2586 }
2587
2588 struct vrtc *
2589 vm_rtc(struct vm *vm)
2590 {
2591
2592 return (vm->vrtc);
2593 }
2594
2595 enum vm_reg_name
2596 vm_segment_name(int seg)
2597 {
2598 static enum vm_reg_name seg_names[] = {
2599 VM_REG_GUEST_ES,
2600 VM_REG_GUEST_CS,
2601 VM_REG_GUEST_SS,
2602 VM_REG_GUEST_DS,
2603 VM_REG_GUEST_FS,
2604 VM_REG_GUEST_GS
2605 };
2606
2607 KASSERT(seg >= 0 && seg < nitems(seg_names),
2608 ("%s: invalid segment encoding %d", __func__, seg));
2609 return (seg_names[seg]);
2610 }
2611
2612 void
2613 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2614 int num_copyinfo)
2615 {
2616 int idx;
2617
2618 for (idx = 0; idx < num_copyinfo; idx++) {
2619 if (copyinfo[idx].cookie != NULL)
2620 vm_gpa_release(copyinfo[idx].cookie);
2621 }
2622 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2623 }
2624
2625 int
2626 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2627 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2628 int num_copyinfo, int *fault)
2629 {
2630 int error, idx, nused;
2631 size_t n, off, remaining;
2632 void *hva, *cookie;
2633 uint64_t gpa;
2634
2635 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2636
2637 nused = 0;
2638 remaining = len;
2639 while (remaining > 0) {
2640 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2641 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2642 if (error || *fault)
2643 return (error);
2644 off = gpa & PAGE_MASK;
2645 n = min(remaining, PAGE_SIZE - off);
2646 copyinfo[nused].gpa = gpa;
2647 copyinfo[nused].len = n;
2648 remaining -= n;
2649 gla += n;
2650 nused++;
2651 }
2652
2653 for (idx = 0; idx < nused; idx++) {
2654 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2655 copyinfo[idx].len, prot, &cookie);
2656 if (hva == NULL)
2657 break;
2658 copyinfo[idx].hva = hva;
2659 copyinfo[idx].cookie = cookie;
2660 }
2661
2662 if (idx != nused) {
2663 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2664 return (EFAULT);
2665 } else {
2666 *fault = 0;
2667 return (0);
2668 }
2669 }
2670
2671 void
2672 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2673 size_t len)
2674 {
2675 char *dst;
2676 int idx;
2677
2678 dst = kaddr;
2679 idx = 0;
2680 while (len > 0) {
2681 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2682 len -= copyinfo[idx].len;
2683 dst += copyinfo[idx].len;
2684 idx++;
2685 }
2686 }
2687
2688 void
2689 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2690 struct vm_copyinfo *copyinfo, size_t len)
2691 {
2692 const char *src;
2693 int idx;
2694
2695 src = kaddr;
2696 idx = 0;
2697 while (len > 0) {
2698 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2699 len -= copyinfo[idx].len;
2700 src += copyinfo[idx].len;
2701 idx++;
2702 }
2703 }
2704
2705 /*
2706 * Return the amount of in-use and wired memory for the VM. Since
2707 * these are global stats, only return the values with for vCPU 0
2708 */
2709 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2710 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2711
2712 static void
2713 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2714 {
2715
2716 if (vcpu == 0) {
2717 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2718 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2719 }
2720 }
2721
2722 static void
2723 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2724 {
2725
2726 if (vcpu == 0) {
2727 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2728 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2729 }
2730 }
2731
2732 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2733 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
Cache object: 0cb5605a551a921025d4df9c9d48e222
|