FreeBSD/Linux Kernel Cross Reference
sys/amd64/vmm/amd/vmcb.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_bhyve_snapshot.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36
37 #include <machine/segments.h>
38 #include <machine/specialreg.h>
39 #include <machine/vmm.h>
40 #include <machine/vmm_snapshot.h>
41
42 #include "vmm_ktr.h"
43
44 #include "vmcb.h"
45 #include "svm.h"
46 #include "svm_softc.h"
47
48 /*
49 * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
50 * in memory that describes the virtual machine.
51 *
52 * The VMCB contains:
53 * - instructions or events in the guest to intercept
54 * - control bits that modify execution environment of the guest
55 * - guest processor state (e.g. general purpose registers)
56 */
57
58 /*
59 * Return VMCB segment area.
60 */
61 static struct vmcb_segment *
62 vmcb_segptr(struct vmcb *vmcb, int type)
63 {
64 struct vmcb_state *state;
65 struct vmcb_segment *seg;
66
67 state = &vmcb->state;
68
69 switch (type) {
70 case VM_REG_GUEST_CS:
71 seg = &state->cs;
72 break;
73
74 case VM_REG_GUEST_DS:
75 seg = &state->ds;
76 break;
77
78 case VM_REG_GUEST_ES:
79 seg = &state->es;
80 break;
81
82 case VM_REG_GUEST_FS:
83 seg = &state->fs;
84 break;
85
86 case VM_REG_GUEST_GS:
87 seg = &state->gs;
88 break;
89
90 case VM_REG_GUEST_SS:
91 seg = &state->ss;
92 break;
93
94 case VM_REG_GUEST_GDTR:
95 seg = &state->gdt;
96 break;
97
98 case VM_REG_GUEST_IDTR:
99 seg = &state->idt;
100 break;
101
102 case VM_REG_GUEST_LDTR:
103 seg = &state->ldt;
104 break;
105
106 case VM_REG_GUEST_TR:
107 seg = &state->tr;
108 break;
109
110 default:
111 seg = NULL;
112 break;
113 }
114
115 return (seg);
116 }
117
118 static int
119 vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val)
120 {
121 struct vmcb *vmcb;
122 int off, bytes;
123 char *ptr;
124
125 vmcb = svm_get_vmcb(vcpu);
126 off = VMCB_ACCESS_OFFSET(ident);
127 bytes = VMCB_ACCESS_BYTES(ident);
128
129 if ((off + bytes) >= sizeof (struct vmcb))
130 return (EINVAL);
131
132 ptr = (char *)vmcb;
133
134 if (!write)
135 *val = 0;
136
137 switch (bytes) {
138 case 8:
139 case 4:
140 case 2:
141 case 1:
142 if (write)
143 memcpy(ptr + off, val, bytes);
144 else
145 memcpy(val, ptr + off, bytes);
146 break;
147 default:
148 SVM_CTR1(vcpu, "Invalid size %d for VMCB access: %d", bytes);
149 return (EINVAL);
150 }
151
152 /* Invalidate all VMCB state cached by h/w. */
153 if (write)
154 svm_set_dirty(vcpu, 0xffffffff);
155
156 return (0);
157 }
158
159 /*
160 * Read from segment selector, control and general purpose register of VMCB.
161 */
162 int
163 vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval)
164 {
165 struct vmcb *vmcb;
166 struct vmcb_state *state;
167 struct vmcb_segment *seg;
168 int err;
169
170 vmcb = svm_get_vmcb(vcpu);
171 state = &vmcb->state;
172 err = 0;
173
174 if (VMCB_ACCESS_OK(ident))
175 return (vmcb_access(vcpu, 0, ident, retval));
176
177 switch (ident) {
178 case VM_REG_GUEST_CR0:
179 *retval = state->cr0;
180 break;
181
182 case VM_REG_GUEST_CR2:
183 *retval = state->cr2;
184 break;
185
186 case VM_REG_GUEST_CR3:
187 *retval = state->cr3;
188 break;
189
190 case VM_REG_GUEST_CR4:
191 *retval = state->cr4;
192 break;
193
194 case VM_REG_GUEST_DR6:
195 *retval = state->dr6;
196 break;
197
198 case VM_REG_GUEST_DR7:
199 *retval = state->dr7;
200 break;
201
202 case VM_REG_GUEST_EFER:
203 *retval = state->efer;
204 break;
205
206 case VM_REG_GUEST_RAX:
207 *retval = state->rax;
208 break;
209
210 case VM_REG_GUEST_RFLAGS:
211 *retval = state->rflags;
212 break;
213
214 case VM_REG_GUEST_RIP:
215 *retval = state->rip;
216 break;
217
218 case VM_REG_GUEST_RSP:
219 *retval = state->rsp;
220 break;
221
222 case VM_REG_GUEST_CS:
223 case VM_REG_GUEST_DS:
224 case VM_REG_GUEST_ES:
225 case VM_REG_GUEST_FS:
226 case VM_REG_GUEST_GS:
227 case VM_REG_GUEST_SS:
228 case VM_REG_GUEST_LDTR:
229 case VM_REG_GUEST_TR:
230 seg = vmcb_segptr(vmcb, ident);
231 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
232 __func__, ident));
233 *retval = seg->selector;
234 break;
235
236 case VM_REG_GUEST_GDTR:
237 case VM_REG_GUEST_IDTR:
238 /* GDTR and IDTR don't have segment selectors */
239 err = EINVAL;
240 break;
241 default:
242 err = EINVAL;
243 break;
244 }
245
246 return (err);
247 }
248
249 /*
250 * Write to segment selector, control and general purpose register of VMCB.
251 */
252 int
253 vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val)
254 {
255 struct vmcb *vmcb;
256 struct vmcb_state *state;
257 struct vmcb_segment *seg;
258 int err, dirtyseg;
259
260 vmcb = svm_get_vmcb(vcpu);
261 state = &vmcb->state;
262 dirtyseg = 0;
263 err = 0;
264
265 if (VMCB_ACCESS_OK(ident))
266 return (vmcb_access(vcpu, 1, ident, &val));
267
268 switch (ident) {
269 case VM_REG_GUEST_CR0:
270 state->cr0 = val;
271 svm_set_dirty(vcpu, VMCB_CACHE_CR);
272 break;
273
274 case VM_REG_GUEST_CR2:
275 state->cr2 = val;
276 svm_set_dirty(vcpu, VMCB_CACHE_CR2);
277 break;
278
279 case VM_REG_GUEST_CR3:
280 state->cr3 = val;
281 svm_set_dirty(vcpu, VMCB_CACHE_CR);
282 break;
283
284 case VM_REG_GUEST_CR4:
285 state->cr4 = val;
286 svm_set_dirty(vcpu, VMCB_CACHE_CR);
287 break;
288
289 case VM_REG_GUEST_DR6:
290 state->dr6 = val;
291 svm_set_dirty(vcpu, VMCB_CACHE_DR);
292 break;
293
294 case VM_REG_GUEST_DR7:
295 state->dr7 = val;
296 svm_set_dirty(vcpu, VMCB_CACHE_DR);
297 break;
298
299 case VM_REG_GUEST_EFER:
300 /* EFER_SVM must always be set when the guest is executing */
301 state->efer = val | EFER_SVM;
302 svm_set_dirty(vcpu, VMCB_CACHE_CR);
303 break;
304
305 case VM_REG_GUEST_RAX:
306 state->rax = val;
307 break;
308
309 case VM_REG_GUEST_RFLAGS:
310 state->rflags = val;
311 break;
312
313 case VM_REG_GUEST_RIP:
314 state->rip = val;
315 break;
316
317 case VM_REG_GUEST_RSP:
318 state->rsp = val;
319 break;
320
321 case VM_REG_GUEST_CS:
322 case VM_REG_GUEST_DS:
323 case VM_REG_GUEST_ES:
324 case VM_REG_GUEST_SS:
325 dirtyseg = 1; /* FALLTHROUGH */
326 case VM_REG_GUEST_FS:
327 case VM_REG_GUEST_GS:
328 case VM_REG_GUEST_LDTR:
329 case VM_REG_GUEST_TR:
330 seg = vmcb_segptr(vmcb, ident);
331 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
332 __func__, ident));
333 seg->selector = val;
334 if (dirtyseg)
335 svm_set_dirty(vcpu, VMCB_CACHE_SEG);
336 break;
337
338 case VM_REG_GUEST_GDTR:
339 case VM_REG_GUEST_IDTR:
340 /* GDTR and IDTR don't have segment selectors */
341 err = EINVAL;
342 break;
343 default:
344 err = EINVAL;
345 break;
346 }
347
348 return (err);
349 }
350
351 int
352 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
353 {
354 struct vmcb_segment *seg;
355
356 seg = vmcb_segptr(vmcb, ident);
357 if (seg != NULL) {
358 bcopy(seg, seg2, sizeof(struct vmcb_segment));
359 return (0);
360 } else {
361 return (EINVAL);
362 }
363 }
364
365 int
366 vmcb_setdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
367 {
368 struct vmcb *vmcb;
369 struct vmcb_segment *seg;
370 uint16_t attrib;
371
372 vmcb = svm_get_vmcb(vcpu);
373
374 seg = vmcb_segptr(vmcb, reg);
375 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
376 __func__, reg));
377
378 seg->base = desc->base;
379 seg->limit = desc->limit;
380 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
381 /*
382 * Map seg_desc access to VMCB attribute format.
383 *
384 * SVM uses the 'P' bit in the segment attributes to indicate a
385 * NULL segment so clear it if the segment is marked unusable.
386 */
387 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
388 if (SEG_DESC_UNUSABLE(desc->access)) {
389 attrib &= ~0x80;
390 }
391 seg->attrib = attrib;
392 }
393
394 SVM_CTR4(vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
395 "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
396
397 switch (reg) {
398 case VM_REG_GUEST_CS:
399 case VM_REG_GUEST_DS:
400 case VM_REG_GUEST_ES:
401 case VM_REG_GUEST_SS:
402 svm_set_dirty(vcpu, VMCB_CACHE_SEG);
403 break;
404 case VM_REG_GUEST_GDTR:
405 case VM_REG_GUEST_IDTR:
406 svm_set_dirty(vcpu, VMCB_CACHE_DT);
407 break;
408 default:
409 break;
410 }
411
412 return (0);
413 }
414
415 int
416 vmcb_getdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
417 {
418 struct vmcb *vmcb;
419 struct vmcb_segment *seg;
420
421 vmcb = svm_get_vmcb(vcpu);
422 seg = vmcb_segptr(vmcb, reg);
423 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
424 __func__, reg));
425
426 desc->base = seg->base;
427 desc->limit = seg->limit;
428 desc->access = 0;
429
430 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
431 /* Map seg_desc access to VMCB attribute format */
432 desc->access = ((seg->attrib & 0xF00) << 4) |
433 (seg->attrib & 0xFF);
434
435 /*
436 * VT-x uses bit 16 to indicate a segment that has been loaded
437 * with a NULL selector (aka unusable). The 'desc->access'
438 * field is interpreted in the VT-x format by the
439 * processor-independent code.
440 *
441 * SVM uses the 'P' bit to convey the same information so
442 * convert it into the VT-x format. For more details refer to
443 * section "Segment State in the VMCB" in APMv2.
444 */
445 if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
446 if ((desc->access & 0x80) == 0)
447 desc->access |= 0x10000; /* Unusable segment */
448 }
449 }
450
451 return (0);
452 }
453
454 #ifdef BHYVE_SNAPSHOT
455 int
456 vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val)
457 {
458 int error = 0;
459
460 if (ident >= VM_REG_LAST) {
461 error = EINVAL;
462 goto err;
463 }
464
465 error = vmcb_read(vcpu, ident, val);
466
467 err:
468 return (error);
469 }
470
471 int
472 vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val)
473 {
474 int error = 0;
475
476 if (ident >= VM_REG_LAST) {
477 error = EINVAL;
478 goto err;
479 }
480
481 error = vmcb_write(vcpu, ident, val);
482
483 err:
484 return (error);
485 }
486
487 int
488 vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
489 struct vm_snapshot_meta *meta)
490 {
491 int ret;
492 struct seg_desc desc;
493
494 if (meta->op == VM_SNAPSHOT_SAVE) {
495 ret = vmcb_getdesc(vcpu, reg, &desc);
496 if (ret != 0)
497 goto done;
498
499 SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
500 SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
501 SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
502 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
503 SNAPSHOT_VAR_OR_LEAVE(desc.base, meta, ret, done);
504 SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
505 SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
506
507 ret = vmcb_setdesc(vcpu, reg, &desc);
508 if (ret != 0)
509 goto done;
510 } else {
511 ret = EINVAL;
512 goto done;
513 }
514
515 done:
516 return (ret);
517 }
518
519 int
520 vmcb_snapshot_any(struct svm_vcpu *vcpu, int ident,
521 struct vm_snapshot_meta *meta)
522 {
523 int ret;
524 uint64_t val;
525
526 if (meta->op == VM_SNAPSHOT_SAVE) {
527 ret = vmcb_getany(vcpu, ident, &val);
528 if (ret != 0)
529 goto done;
530
531 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
532 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
533 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
534
535 ret = vmcb_setany(vcpu, ident, val);
536 if (ret != 0)
537 goto done;
538 } else {
539 ret = EINVAL;
540 goto done;
541 }
542
543 done:
544 return (ret);
545 }
546 #endif
Cache object: b05a2ca734dc236dcc9305f2deb1e87f
|