1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_bhyve_snapshot.h"
33
34 #include <sys/param.h>
35 #include <sys/errno.h>
36 #include <sys/systm.h>
37
38 #include <machine/cpufunc.h>
39 #include <machine/specialreg.h>
40 #include <machine/vmm.h>
41
42 #include "svm.h"
43 #include "vmcb.h"
44 #include "svm_softc.h"
45 #include "svm_msr.h"
46
47 #ifndef MSR_AMDK8_IPM
48 #define MSR_AMDK8_IPM 0xc0010055
49 #endif
50
51 enum {
52 IDX_MSR_LSTAR,
53 IDX_MSR_CSTAR,
54 IDX_MSR_STAR,
55 IDX_MSR_SF_MASK,
56 HOST_MSR_NUM /* must be the last enumeration */
57 };
58
59 static uint64_t host_msrs[HOST_MSR_NUM];
60
61 void
62 svm_msr_init(void)
63 {
64 /*
65 * It is safe to cache the values of the following MSRs because they
66 * don't change based on curcpu, curproc or curthread.
67 */
68 host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
69 host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
70 host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
71 host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
72 }
73
74 void
75 svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
76 {
77 /*
78 * All the MSRs accessible to the guest are either saved/restored by
79 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
80 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
81 *
82 * There are no guest MSRs that are saved/restored "by hand" so nothing
83 * more to do here.
84 */
85 return;
86 }
87
88 void
89 svm_msr_guest_enter(struct svm_vcpu *vcpu)
90 {
91 /*
92 * Save host MSRs (if any) and restore guest MSRs (if any).
93 */
94 }
95
96 void
97 svm_msr_guest_exit(struct svm_vcpu *vcpu)
98 {
99 /*
100 * Save guest MSRs (if any) and restore host MSRs.
101 */
102 wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
103 wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
104 wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
105 wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
106
107 /* MSR_KGSBASE will be restored on the way back to userspace */
108 }
109
110 int
111 svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu)
112 {
113 int error = 0;
114
115 switch (num) {
116 case MSR_MCG_CAP:
117 case MSR_MCG_STATUS:
118 *result = 0;
119 break;
120 case MSR_MTRRcap:
121 case MSR_MTRRdefType:
122 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
123 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
124 case MSR_MTRR64kBase:
125 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
126 if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
127 vm_inject_gp(vcpu->vcpu);
128 }
129 break;
130 case MSR_SYSCFG:
131 case MSR_AMDK8_IPM:
132 case MSR_EXTFEATURES:
133 *result = 0;
134 break;
135 default:
136 error = EINVAL;
137 break;
138 }
139
140 return (error);
141 }
142
143 int
144 svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
145 {
146 int error = 0;
147
148 switch (num) {
149 case MSR_MCG_CAP:
150 case MSR_MCG_STATUS:
151 break; /* ignore writes */
152 case MSR_MTRRcap:
153 case MSR_MTRRdefType:
154 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
155 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
156 case MSR_MTRR64kBase:
157 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
158 if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
159 vm_inject_gp(vcpu->vcpu);
160 }
161 break;
162 case MSR_SYSCFG:
163 break; /* Ignore writes */
164 case MSR_AMDK8_IPM:
165 /*
166 * Ignore writes to the "Interrupt Pending Message" MSR.
167 */
168 break;
169 case MSR_K8_UCODE_UPDATE:
170 /*
171 * Ignore writes to microcode update register.
172 */
173 break;
174 #ifdef BHYVE_SNAPSHOT
175 case MSR_TSC:
176 svm_set_tsc_offset(vcpu, val - rdtsc());
177 break;
178 #endif
179 case MSR_EXTFEATURES:
180 break;
181 default:
182 error = EINVAL;
183 break;
184 }
185
186 return (error);
187 }
Cache object: 0b40a86567059638905f966326e70a1e
|