1 /*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (c) 2007, Keir Fraser
21 */
22
23 #ifndef __XEN_PUBLIC_HVM_PARAMS_H__
24 #define __XEN_PUBLIC_HVM_PARAMS_H__
25
26 #include "hvm_op.h"
27
28 /* These parameters are deprecated and their meaning is undefined. */
29 #if defined(__XEN__) || defined(__XEN_TOOLS__)
30
31 #define HVM_PARAM_PAE_ENABLED 4
32 #define HVM_PARAM_DM_DOMAIN 13
33 #define HVM_PARAM_MEMORY_EVENT_CR0 20
34 #define HVM_PARAM_MEMORY_EVENT_CR3 21
35 #define HVM_PARAM_MEMORY_EVENT_CR4 22
36 #define HVM_PARAM_MEMORY_EVENT_INT3 23
37 #define HVM_PARAM_NESTEDHVM 24
38 #define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
39 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
40 #define HVM_PARAM_MEMORY_EVENT_MSR 30
41
42 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
43
44 /*
45 * Parameter space for HVMOP_{set,get}_param.
46 */
47
48 #define HVM_PARAM_CALLBACK_IRQ 0
49 #define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000)
50 /*
51 * How should CPU0 event-channel notifications be delivered?
52 *
53 * If val == 0 then CPU0 event-channel notifications are not delivered.
54 * If val != 0, val[63:56] encodes the type, as follows:
55 */
56
57 #define HVM_PARAM_CALLBACK_TYPE_GSI 0
58 /*
59 * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
60 * and disables all notifications.
61 */
62
63 #define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
64 /*
65 * val[55:0] is a delivery PCI INTx line:
66 * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
67 */
68
69 #if defined(__i386__) || defined(__x86_64__)
70 #define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
71 /*
72 * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
73 * if this delivery method is available.
74 */
75 #elif defined(__arm__) || defined(__aarch64__)
76 #define HVM_PARAM_CALLBACK_TYPE_PPI 2
77 /*
78 * val[55:16] needs to be zero.
79 * val[15:8] is interrupt flag of the PPI used by event-channel:
80 * bit 8: the PPI is edge(1) or level(0) triggered
81 * bit 9: the PPI is active low(1) or high(0)
82 * val[7:0] is a PPI number used by event-channel.
83 * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
84 * the notification is handled by the interrupt controller.
85 */
86 #define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00
87 #define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2
88 #endif
89
90 /*
91 * These are not used by Xen. They are here for convenience of HVM-guest
92 * xenbus implementations.
93 */
94 #define HVM_PARAM_STORE_PFN 1
95 #define HVM_PARAM_STORE_EVTCHN 2
96
97 #define HVM_PARAM_IOREQ_PFN 5
98
99 #define HVM_PARAM_BUFIOREQ_PFN 6
100
101 #if defined(__i386__) || defined(__x86_64__)
102
103 /*
104 * Viridian enlightenments
105 *
106 * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx)
107 *
108 * To expose viridian enlightenments to the guest set this parameter
109 * to the desired feature mask. The base feature set must be present
110 * in any valid feature mask.
111 */
112 #define HVM_PARAM_VIRIDIAN 9
113
114 /* Base+Freq viridian feature sets:
115 *
116 * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL)
117 * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
118 * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX)
119 * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
120 * HV_X64_MSR_APIC_FREQUENCY)
121 */
122 #define _HVMPV_base_freq 0
123 #define HVMPV_base_freq (1 << _HVMPV_base_freq)
124
125 /* Feature set modifications */
126
127 /* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
128 * HV_X64_MSR_APIC_FREQUENCY).
129 * This modification restores the viridian feature set to the
130 * original 'base' set exposed in releases prior to Xen 4.4.
131 */
132 #define _HVMPV_no_freq 1
133 #define HVMPV_no_freq (1 << _HVMPV_no_freq)
134
135 /* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
136 #define _HVMPV_time_ref_count 2
137 #define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
138
139 /* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
140 #define _HVMPV_reference_tsc 3
141 #define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
142
143 /* Use Hypercall for remote TLB flush */
144 #define _HVMPV_hcall_remote_tlb_flush 4
145 #define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
146
147 /* Use APIC assist */
148 #define _HVMPV_apic_assist 5
149 #define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
150
151 /* Enable crash MSRs */
152 #define _HVMPV_crash_ctl 6
153 #define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
154
155 /* Enable SYNIC MSRs */
156 #define _HVMPV_synic 7
157 #define HVMPV_synic (1 << _HVMPV_synic)
158
159 /* Enable STIMER MSRs */
160 #define _HVMPV_stimer 8
161 #define HVMPV_stimer (1 << _HVMPV_stimer)
162
163 /* Use Synthetic Cluster IPI Hypercall */
164 #define _HVMPV_hcall_ipi 9
165 #define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi)
166
167 /* Enable ExProcessorMasks */
168 #define _HVMPV_ex_processor_masks 10
169 #define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks)
170
171 /* Allow more than 64 VPs */
172 #define _HVMPV_no_vp_limit 11
173 #define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit)
174
175 /* Enable vCPU hotplug */
176 #define _HVMPV_cpu_hotplug 12
177 #define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug)
178
179 #define HVMPV_feature_mask \
180 (HVMPV_base_freq | \
181 HVMPV_no_freq | \
182 HVMPV_time_ref_count | \
183 HVMPV_reference_tsc | \
184 HVMPV_hcall_remote_tlb_flush | \
185 HVMPV_apic_assist | \
186 HVMPV_crash_ctl | \
187 HVMPV_synic | \
188 HVMPV_stimer | \
189 HVMPV_hcall_ipi | \
190 HVMPV_ex_processor_masks | \
191 HVMPV_no_vp_limit | \
192 HVMPV_cpu_hotplug)
193
194 #endif
195
196 /*
197 * Set mode for virtual timers (currently x86 only):
198 * delay_for_missed_ticks (default):
199 * Do not advance a vcpu's time beyond the correct delivery time for
200 * interrupts that have been missed due to preemption. Deliver missed
201 * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
202 * time stepwise for each one.
203 * no_delay_for_missed_ticks:
204 * As above, missed interrupts are delivered, but guest time always tracks
205 * wallclock (i.e., real) time while doing so.
206 * no_missed_ticks_pending:
207 * No missed interrupts are held pending. Instead, to ensure ticks are
208 * delivered at some non-zero rate, if we detect missed ticks then the
209 * internal tick alarm is not disabled if the VCPU is preempted during the
210 * next tick period.
211 * one_missed_tick_pending:
212 * Missed interrupts are collapsed together and delivered as one 'late tick'.
213 * Guest time always tracks wallclock (i.e., real) time.
214 */
215 #define HVM_PARAM_TIMER_MODE 10
216 #define HVMPTM_delay_for_missed_ticks 0
217 #define HVMPTM_no_delay_for_missed_ticks 1
218 #define HVMPTM_no_missed_ticks_pending 2
219 #define HVMPTM_one_missed_tick_pending 3
220
221 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
222 #define HVM_PARAM_HPET_ENABLED 11
223
224 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */
225 #define HVM_PARAM_IDENT_PT 12
226
227 /* ACPI S state: currently support S0 and S3 on x86. */
228 #define HVM_PARAM_ACPI_S_STATE 14
229
230 /* TSS used on Intel when CR0.PE=0. */
231 #define HVM_PARAM_VM86_TSS 15
232
233 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */
234 #define HVM_PARAM_VPT_ALIGN 16
235
236 /* Console debug shared memory ring and event channel */
237 #define HVM_PARAM_CONSOLE_PFN 17
238 #define HVM_PARAM_CONSOLE_EVTCHN 18
239
240 /*
241 * Select location of ACPI PM1a and TMR control blocks. Currently two locations
242 * are supported, specified by version 0 or 1 in this parameter:
243 * - 0: default, use the old addresses
244 * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48
245 * - 1: use the new default qemu addresses
246 * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
247 * You can find these address definitions in <hvm/ioreq.h>
248 */
249 #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
250
251 /* Params for the mem event rings */
252 #define HVM_PARAM_PAGING_RING_PFN 27
253 #define HVM_PARAM_MONITOR_RING_PFN 28
254 #define HVM_PARAM_SHARING_RING_PFN 29
255
256 /* SHUTDOWN_* action in case of a triple fault */
257 #define HVM_PARAM_TRIPLE_FAULT_REASON 31
258
259 #define HVM_PARAM_IOREQ_SERVER_PFN 32
260 #define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
261
262 /* Location of the VM Generation ID in guest physical address space. */
263 #define HVM_PARAM_VM_GENERATION_ID_ADDR 34
264
265 /*
266 * Set mode for altp2m:
267 * disabled: don't activate altp2m (default)
268 * mixed: allow access to all altp2m ops for both in-guest and external tools
269 * external: allow access to external privileged tools only
270 * limited: guest only has limited access (ie. control VMFUNC and #VE)
271 *
272 * Note that 'mixed' mode has not been evaluated for safety from a
273 * security perspective. Before using this mode in a
274 * security-critical environment, each subop should be evaluated for
275 * safety, with unsafe subops blacklisted in XSM.
276 */
277 #define HVM_PARAM_ALTP2M 35
278 #define XEN_ALTP2M_disabled 0
279 #define XEN_ALTP2M_mixed 1
280 #define XEN_ALTP2M_external 2
281 #define XEN_ALTP2M_limited 3
282
283 /*
284 * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
285 * save/restore. This is a workaround for a hardware limitation that
286 * does not allow the full FIP/FDP and FCS/FDS to be restored.
287 *
288 * Valid values are:
289 *
290 * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU
291 * has FPCSDS feature).
292 *
293 * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of
294 * FIP/FDP.
295 *
296 * 0: allow hypervisor to choose based on the value of FIP/FDP
297 * (default if CPU does not have FPCSDS).
298 *
299 * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU
300 * never saves FCS/FDS and this parameter should be left at the
301 * default of 8.
302 */
303 #define HVM_PARAM_X87_FIP_WIDTH 36
304
305 /*
306 * TSS (and its size) used on Intel when CR0.PE=0. The address occupies
307 * the low 32 bits, while the size is in the high 32 ones.
308 */
309 #define HVM_PARAM_VM86_TSS_SIZED 37
310
311 /* Enable MCA capabilities. */
312 #define HVM_PARAM_MCA_CAP 38
313 #define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0)
314 #define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE
315
316 #define HVM_NR_PARAMS 39
317
318 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
Cache object: a980223d616bae6ca28b968666da1789
|