1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.2/sys/amd64/include/vmm.h 331722 2018-03-29 02:50:57Z eadler $
27 */
28
29 #ifndef _VMM_H_
30 #define _VMM_H_
31
32 #include <x86/segments.h>
33
34 enum vm_suspend_how {
35 VM_SUSPEND_NONE,
36 VM_SUSPEND_RESET,
37 VM_SUSPEND_POWEROFF,
38 VM_SUSPEND_HALT,
39 VM_SUSPEND_TRIPLEFAULT,
40 VM_SUSPEND_LAST
41 };
42
43 /*
44 * Identifiers for architecturally defined registers.
45 */
46 enum vm_reg_name {
47 VM_REG_GUEST_RAX,
48 VM_REG_GUEST_RBX,
49 VM_REG_GUEST_RCX,
50 VM_REG_GUEST_RDX,
51 VM_REG_GUEST_RSI,
52 VM_REG_GUEST_RDI,
53 VM_REG_GUEST_RBP,
54 VM_REG_GUEST_R8,
55 VM_REG_GUEST_R9,
56 VM_REG_GUEST_R10,
57 VM_REG_GUEST_R11,
58 VM_REG_GUEST_R12,
59 VM_REG_GUEST_R13,
60 VM_REG_GUEST_R14,
61 VM_REG_GUEST_R15,
62 VM_REG_GUEST_CR0,
63 VM_REG_GUEST_CR3,
64 VM_REG_GUEST_CR4,
65 VM_REG_GUEST_DR7,
66 VM_REG_GUEST_RSP,
67 VM_REG_GUEST_RIP,
68 VM_REG_GUEST_RFLAGS,
69 VM_REG_GUEST_ES,
70 VM_REG_GUEST_CS,
71 VM_REG_GUEST_SS,
72 VM_REG_GUEST_DS,
73 VM_REG_GUEST_FS,
74 VM_REG_GUEST_GS,
75 VM_REG_GUEST_LDTR,
76 VM_REG_GUEST_TR,
77 VM_REG_GUEST_IDTR,
78 VM_REG_GUEST_GDTR,
79 VM_REG_GUEST_EFER,
80 VM_REG_GUEST_CR2,
81 VM_REG_GUEST_PDPTE0,
82 VM_REG_GUEST_PDPTE1,
83 VM_REG_GUEST_PDPTE2,
84 VM_REG_GUEST_PDPTE3,
85 VM_REG_GUEST_INTR_SHADOW,
86 VM_REG_GUEST_DR0,
87 VM_REG_GUEST_DR1,
88 VM_REG_GUEST_DR2,
89 VM_REG_GUEST_DR3,
90 VM_REG_GUEST_DR6,
91 VM_REG_LAST
92 };
93
94 enum x2apic_state {
95 X2APIC_DISABLED,
96 X2APIC_ENABLED,
97 X2APIC_STATE_LAST
98 };
99
100 #define VM_INTINFO_VECTOR(info) ((info) & 0xff)
101 #define VM_INTINFO_DEL_ERRCODE 0x800
102 #define VM_INTINFO_RSVD 0x7ffff000
103 #define VM_INTINFO_VALID 0x80000000
104 #define VM_INTINFO_TYPE 0x700
105 #define VM_INTINFO_HWINTR (0 << 8)
106 #define VM_INTINFO_NMI (2 << 8)
107 #define VM_INTINFO_HWEXCEPTION (3 << 8)
108 #define VM_INTINFO_SWINTR (4 << 8)
109
110 #ifdef _KERNEL
111
112 #define VM_MAX_NAMELEN 32
113
114 struct vm;
115 struct vm_exception;
116 struct seg_desc;
117 struct vm_exit;
118 struct vm_run;
119 struct vhpet;
120 struct vioapic;
121 struct vlapic;
122 struct vmspace;
123 struct vm_object;
124 struct vm_guest_paging;
125 struct pmap;
126
127 struct vm_eventinfo {
128 void *rptr; /* rendezvous cookie */
129 int *sptr; /* suspend cookie */
130 int *iptr; /* reqidle cookie */
131 };
132
133 typedef int (*vmm_init_func_t)(int ipinum);
134 typedef int (*vmm_cleanup_func_t)(void);
135 typedef void (*vmm_resume_func_t)(void);
136 typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
137 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
138 struct pmap *pmap, struct vm_eventinfo *info);
139 typedef void (*vmi_cleanup_func_t)(void *vmi);
140 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
141 uint64_t *retval);
142 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
143 uint64_t val);
144 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
145 struct seg_desc *desc);
146 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
147 struct seg_desc *desc);
148 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
149 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
150 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
151 typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
152 typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
153 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
154
155 struct vmm_ops {
156 vmm_init_func_t init; /* module wide initialization */
157 vmm_cleanup_func_t cleanup;
158 vmm_resume_func_t resume;
159
160 vmi_init_func_t vminit; /* vm-specific initialization */
161 vmi_run_func_t vmrun;
162 vmi_cleanup_func_t vmcleanup;
163 vmi_get_register_t vmgetreg;
164 vmi_set_register_t vmsetreg;
165 vmi_get_desc_t vmgetdesc;
166 vmi_set_desc_t vmsetdesc;
167 vmi_get_cap_t vmgetcap;
168 vmi_set_cap_t vmsetcap;
169 vmi_vmspace_alloc vmspace_alloc;
170 vmi_vmspace_free vmspace_free;
171 vmi_vlapic_init vlapic_init;
172 vmi_vlapic_cleanup vlapic_cleanup;
173 };
174
175 extern struct vmm_ops vmm_ops_intel;
176 extern struct vmm_ops vmm_ops_amd;
177
178 int vm_create(const char *name, struct vm **retvm);
179 void vm_destroy(struct vm *vm);
180 int vm_reinit(struct vm *vm);
181 const char *vm_name(struct vm *vm);
182
183 /*
184 * APIs that modify the guest memory map require all vcpus to be frozen.
185 */
186 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
187 size_t len, int prot, int flags);
188 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
189 void vm_free_memseg(struct vm *vm, int ident);
190 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
191 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
192 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
193 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
194
195 /*
196 * APIs that inspect the guest memory map require only a *single* vcpu to
197 * be frozen. This acts like a read lock on the guest memory map since any
198 * modification requires *all* vcpus to be frozen.
199 */
200 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
201 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
202 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
203 struct vm_object **objptr);
204 void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
205 int prot, void **cookie);
206 void vm_gpa_release(void *cookie);
207 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
208
209 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
210 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
211 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
212 struct seg_desc *ret_desc);
213 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
214 struct seg_desc *desc);
215 int vm_run(struct vm *vm, struct vm_run *vmrun);
216 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
217 int vm_inject_nmi(struct vm *vm, int vcpu);
218 int vm_nmi_pending(struct vm *vm, int vcpuid);
219 void vm_nmi_clear(struct vm *vm, int vcpuid);
220 int vm_inject_extint(struct vm *vm, int vcpu);
221 int vm_extint_pending(struct vm *vm, int vcpuid);
222 void vm_extint_clear(struct vm *vm, int vcpuid);
223 struct vlapic *vm_lapic(struct vm *vm, int cpu);
224 struct vioapic *vm_ioapic(struct vm *vm);
225 struct vhpet *vm_hpet(struct vm *vm);
226 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
227 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
228 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
229 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
230 int vm_apicid2vcpuid(struct vm *vm, int apicid);
231 int vm_activate_cpu(struct vm *vm, int vcpu);
232 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
233 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
234 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
235 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
236 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
237
238 #ifdef _SYS__CPUSET_H_
239 /*
240 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
241 * The rendezvous 'func(arg)' is not allowed to do anything that will
242 * cause the thread to be put to sleep.
243 *
244 * If the rendezvous is being initiated from a vcpu context then the
245 * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1.
246 *
247 * The caller cannot hold any locks when initiating the rendezvous.
248 *
249 * The implementation of this API may cause vcpus other than those specified
250 * by 'dest' to be stalled. The caller should not rely on any vcpus making
251 * forward progress when the rendezvous is in progress.
252 */
253 typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg);
254 void vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
255 vm_rendezvous_func_t func, void *arg);
256 cpuset_t vm_active_cpus(struct vm *vm);
257 cpuset_t vm_suspended_cpus(struct vm *vm);
258 #endif /* _SYS__CPUSET_H_ */
259
260 static __inline int
261 vcpu_rendezvous_pending(struct vm_eventinfo *info)
262 {
263
264 return (*((uintptr_t *)(info->rptr)) != 0);
265 }
266
267 static __inline int
268 vcpu_suspended(struct vm_eventinfo *info)
269 {
270
271 return (*info->sptr);
272 }
273
274 static __inline int
275 vcpu_reqidle(struct vm_eventinfo *info)
276 {
277
278 return (*info->iptr);
279 }
280
281 /*
282 * Return 1 if device indicated by bus/slot/func is supposed to be a
283 * pci passthrough device.
284 *
285 * Return 0 otherwise.
286 */
287 int vmm_is_pptdev(int bus, int slot, int func);
288
289 void *vm_iommu_domain(struct vm *vm);
290
291 enum vcpu_state {
292 VCPU_IDLE,
293 VCPU_FROZEN,
294 VCPU_RUNNING,
295 VCPU_SLEEPING,
296 };
297
298 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
299 bool from_idle);
300 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
301
302 static int __inline
303 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
304 {
305 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
306 }
307
308 #ifdef _SYS_PROC_H_
309 static int __inline
310 vcpu_should_yield(struct vm *vm, int vcpu)
311 {
312
313 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED))
314 return (1);
315 else if (curthread->td_owepreempt)
316 return (1);
317 else
318 return (0);
319 }
320 #endif
321
322 void *vcpu_stats(struct vm *vm, int vcpu);
323 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
324 struct vmspace *vm_get_vmspace(struct vm *vm);
325 struct vatpic *vm_atpic(struct vm *vm);
326 struct vatpit *vm_atpit(struct vm *vm);
327 struct vpmtmr *vm_pmtmr(struct vm *vm);
328 struct vrtc *vm_rtc(struct vm *vm);
329
330 /*
331 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
332 * success and non-zero on failure.
333 *
334 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
335 * this function directly because they enforce the trap-like or fault-like
336 * behavior of an exception.
337 *
338 * This function should only be called in the context of the thread that is
339 * executing this vcpu.
340 */
341 int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
342 uint32_t errcode, int restart_instruction);
343
344 /*
345 * This function is called after a VM-exit that occurred during exception or
346 * interrupt delivery through the IDT. The format of 'intinfo' is described
347 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
348 *
349 * If a VM-exit handler completes the event delivery successfully then it
350 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
351 * if the task switch emulation is triggered via a task gate then it should
352 * call this function with 'intinfo=0' to indicate that the external event
353 * is not pending anymore.
354 *
355 * Return value is 0 on success and non-zero on failure.
356 */
357 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
358
359 /*
360 * This function is called before every VM-entry to retrieve a pending
361 * event that should be injected into the guest. This function combines
362 * nested events into a double or triple fault.
363 *
364 * Returns 0 if there are no events that need to be injected into the guest
365 * and non-zero otherwise.
366 */
367 int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
368
369 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
370
371 enum vm_reg_name vm_segment_name(int seg_encoding);
372
373 struct vm_copyinfo {
374 uint64_t gpa;
375 size_t len;
376 void *hva;
377 void *cookie;
378 };
379
380 /*
381 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
382 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
383 * a copyin or PROT_WRITE for a copyout.
384 *
385 * retval is_fault Interpretation
386 * 0 0 Success
387 * 0 1 An exception was injected into the guest
388 * EFAULT N/A Unrecoverable error
389 *
390 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
391 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
392 * 'vm_copy_teardown()' after the copy is done.
393 */
394 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
395 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
396 int num_copyinfo, int *is_fault);
397 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
398 int num_copyinfo);
399 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
400 void *kaddr, size_t len);
401 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
402 struct vm_copyinfo *copyinfo, size_t len);
403
404 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
405 #endif /* KERNEL */
406
407 #define VM_MAXCPU 16 /* maximum virtual cpus */
408
409 /*
410 * Identifiers for optional vmm capabilities
411 */
412 enum vm_cap_type {
413 VM_CAP_HALT_EXIT,
414 VM_CAP_MTRAP_EXIT,
415 VM_CAP_PAUSE_EXIT,
416 VM_CAP_UNRESTRICTED_GUEST,
417 VM_CAP_ENABLE_INVPCID,
418 VM_CAP_MAX
419 };
420
421 enum vm_intr_trigger {
422 EDGE_TRIGGER,
423 LEVEL_TRIGGER
424 };
425
426 /*
427 * The 'access' field has the format specified in Table 21-2 of the Intel
428 * Architecture Manual vol 3b.
429 *
430 * XXX The contents of the 'access' field are architecturally defined except
431 * bit 16 - Segment Unusable.
432 */
433 struct seg_desc {
434 uint64_t base;
435 uint32_t limit;
436 uint32_t access;
437 };
438 #define SEG_DESC_TYPE(access) ((access) & 0x001f)
439 #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
440 #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
441 #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
442 #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
443 #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
444
445 enum vm_cpu_mode {
446 CPU_MODE_REAL,
447 CPU_MODE_PROTECTED,
448 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
449 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
450 };
451
452 enum vm_paging_mode {
453 PAGING_MODE_FLAT,
454 PAGING_MODE_32,
455 PAGING_MODE_PAE,
456 PAGING_MODE_64,
457 };
458
459 struct vm_guest_paging {
460 uint64_t cr3;
461 int cpl;
462 enum vm_cpu_mode cpu_mode;
463 enum vm_paging_mode paging_mode;
464 };
465
466 /*
467 * The data structures 'vie' and 'vie_op' are meant to be opaque to the
468 * consumers of instruction decoding. The only reason why their contents
469 * need to be exposed is because they are part of the 'vm_exit' structure.
470 */
471 struct vie_op {
472 uint8_t op_byte; /* actual opcode byte */
473 uint8_t op_type; /* type of operation (e.g. MOV) */
474 uint16_t op_flags;
475 };
476
477 #define VIE_INST_SIZE 15
478 struct vie {
479 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
480 uint8_t num_valid; /* size of the instruction */
481 uint8_t num_processed;
482
483 uint8_t addrsize:4, opsize:4; /* address and operand sizes */
484 uint8_t rex_w:1, /* REX prefix */
485 rex_r:1,
486 rex_x:1,
487 rex_b:1,
488 rex_present:1,
489 repz_present:1, /* REP/REPE/REPZ prefix */
490 repnz_present:1, /* REPNE/REPNZ prefix */
491 opsize_override:1, /* Operand size override */
492 addrsize_override:1, /* Address size override */
493 segment_override:1; /* Segment override */
494
495 uint8_t mod:2, /* ModRM byte */
496 reg:4,
497 rm:4;
498
499 uint8_t ss:2, /* SIB byte */
500 index:4,
501 base:4;
502
503 uint8_t disp_bytes;
504 uint8_t imm_bytes;
505
506 uint8_t scale;
507 int base_register; /* VM_REG_GUEST_xyz */
508 int index_register; /* VM_REG_GUEST_xyz */
509 int segment_register; /* VM_REG_GUEST_xyz */
510
511 int64_t displacement; /* optional addr displacement */
512 int64_t immediate; /* optional immediate operand */
513
514 uint8_t decoded; /* set to 1 if successfully decoded */
515
516 struct vie_op op; /* opcode description */
517 };
518
519 enum vm_exitcode {
520 VM_EXITCODE_INOUT,
521 VM_EXITCODE_VMX,
522 VM_EXITCODE_BOGUS,
523 VM_EXITCODE_RDMSR,
524 VM_EXITCODE_WRMSR,
525 VM_EXITCODE_HLT,
526 VM_EXITCODE_MTRAP,
527 VM_EXITCODE_PAUSE,
528 VM_EXITCODE_PAGING,
529 VM_EXITCODE_INST_EMUL,
530 VM_EXITCODE_SPINUP_AP,
531 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
532 VM_EXITCODE_RENDEZVOUS,
533 VM_EXITCODE_IOAPIC_EOI,
534 VM_EXITCODE_SUSPENDED,
535 VM_EXITCODE_INOUT_STR,
536 VM_EXITCODE_TASK_SWITCH,
537 VM_EXITCODE_MONITOR,
538 VM_EXITCODE_MWAIT,
539 VM_EXITCODE_SVM,
540 VM_EXITCODE_REQIDLE,
541 VM_EXITCODE_MAX
542 };
543
544 struct vm_inout {
545 uint16_t bytes:3; /* 1 or 2 or 4 */
546 uint16_t in:1;
547 uint16_t string:1;
548 uint16_t rep:1;
549 uint16_t port;
550 uint32_t eax; /* valid for out */
551 };
552
553 struct vm_inout_str {
554 struct vm_inout inout; /* must be the first element */
555 struct vm_guest_paging paging;
556 uint64_t rflags;
557 uint64_t cr0;
558 uint64_t index;
559 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
560 int addrsize;
561 enum vm_reg_name seg_name;
562 struct seg_desc seg_desc;
563 };
564
565 enum task_switch_reason {
566 TSR_CALL,
567 TSR_IRET,
568 TSR_JMP,
569 TSR_IDT_GATE, /* task gate in IDT */
570 };
571
572 struct vm_task_switch {
573 uint16_t tsssel; /* new TSS selector */
574 int ext; /* task switch due to external event */
575 uint32_t errcode;
576 int errcode_valid; /* push 'errcode' on the new stack */
577 enum task_switch_reason reason;
578 struct vm_guest_paging paging;
579 };
580
581 struct vm_exit {
582 enum vm_exitcode exitcode;
583 int inst_length; /* 0 means unknown */
584 uint64_t rip;
585 union {
586 struct vm_inout inout;
587 struct vm_inout_str inout_str;
588 struct {
589 uint64_t gpa;
590 int fault_type;
591 } paging;
592 struct {
593 uint64_t gpa;
594 uint64_t gla;
595 uint64_t cs_base;
596 int cs_d; /* CS.D */
597 struct vm_guest_paging paging;
598 struct vie vie;
599 } inst_emul;
600 /*
601 * VMX specific payload. Used when there is no "better"
602 * exitcode to represent the VM-exit.
603 */
604 struct {
605 int status; /* vmx inst status */
606 /*
607 * 'exit_reason' and 'exit_qualification' are valid
608 * only if 'status' is zero.
609 */
610 uint32_t exit_reason;
611 uint64_t exit_qualification;
612 /*
613 * 'inst_error' and 'inst_type' are valid
614 * only if 'status' is non-zero.
615 */
616 int inst_type;
617 int inst_error;
618 } vmx;
619 /*
620 * SVM specific payload.
621 */
622 struct {
623 uint64_t exitcode;
624 uint64_t exitinfo1;
625 uint64_t exitinfo2;
626 } svm;
627 struct {
628 uint32_t code; /* ecx value */
629 uint64_t wval;
630 } msr;
631 struct {
632 int vcpu;
633 uint64_t rip;
634 } spinup_ap;
635 struct {
636 uint64_t rflags;
637 } hlt;
638 struct {
639 int vector;
640 } ioapic_eoi;
641 struct {
642 enum vm_suspend_how how;
643 } suspended;
644 struct vm_task_switch task_switch;
645 } u;
646 };
647
648 /* APIs to inject faults into the guest */
649 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
650 int errcode);
651
652 static __inline void
653 vm_inject_ud(void *vm, int vcpuid)
654 {
655 vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
656 }
657
658 static __inline void
659 vm_inject_gp(void *vm, int vcpuid)
660 {
661 vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
662 }
663
664 static __inline void
665 vm_inject_ac(void *vm, int vcpuid, int errcode)
666 {
667 vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
668 }
669
670 static __inline void
671 vm_inject_ss(void *vm, int vcpuid, int errcode)
672 {
673 vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
674 }
675
676 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
677
678 int vm_restart_instruction(void *vm, int vcpuid);
679
680 #endif /* _VMM_H_ */
Cache object: 139f423eb82187f7d795afef89568d59
|