1 /*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 */
20
21 #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
22 #define __XEN_PUBLIC_HVM_HVM_OP_H__
23
24 #include "../xen.h"
25 #include "../trace.h"
26
27 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
28 #define HVMOP_set_param 0
29 #define HVMOP_get_param 1
30 struct xen_hvm_param {
31 domid_t domid; /* IN */
32 uint32_t index; /* IN */
33 uint64_t value; /* IN/OUT */
34 };
35 typedef struct xen_hvm_param xen_hvm_param_t;
36 DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
37
38 /* Set the logical level of one of a domain's PCI INTx wires. */
39 #define HVMOP_set_pci_intx_level 2
40 struct xen_hvm_set_pci_intx_level {
41 /* Domain to be updated. */
42 domid_t domid;
43 /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
44 uint8_t domain, bus, device, intx;
45 /* Assertion level (0 = unasserted, 1 = asserted). */
46 uint8_t level;
47 };
48 typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
49 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
50
51 /* Set the logical level of one of a domain's ISA IRQ wires. */
52 #define HVMOP_set_isa_irq_level 3
53 struct xen_hvm_set_isa_irq_level {
54 /* Domain to be updated. */
55 domid_t domid;
56 /* ISA device identification, by ISA IRQ (0-15). */
57 uint8_t isa_irq;
58 /* Assertion level (0 = unasserted, 1 = asserted). */
59 uint8_t level;
60 };
61 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
62 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
63
64 #define HVMOP_set_pci_link_route 4
65 struct xen_hvm_set_pci_link_route {
66 /* Domain to be updated. */
67 domid_t domid;
68 /* PCI link identifier (0-3). */
69 uint8_t link;
70 /* ISA IRQ (1-15), or 0 (disable link). */
71 uint8_t isa_irq;
72 };
73 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
74 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
75
76 /* Flushes all VCPU TLBs: @arg must be NULL. */
77 #define HVMOP_flush_tlbs 5
78
79 typedef enum {
80 HVMMEM_ram_rw, /* Normal read/write guest RAM */
81 HVMMEM_ram_ro, /* Read-only; writes are discarded */
82 HVMMEM_mmio_dm, /* Reads and write go to the device model */
83 } hvmmem_type_t;
84
85 /* Following tools-only interfaces may change in future. */
86 #if defined(__XEN__) || defined(__XEN_TOOLS__)
87
88 /* Track dirty VRAM. */
89 #define HVMOP_track_dirty_vram 6
90 struct xen_hvm_track_dirty_vram {
91 /* Domain to be tracked. */
92 domid_t domid;
93 /* First pfn to track. */
94 uint64_aligned_t first_pfn;
95 /* Number of pages to track. */
96 uint64_aligned_t nr;
97 /* OUT variable. */
98 /* Dirty bitmap buffer. */
99 XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
100 };
101 typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
102 DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
103
104 /* Notify that some pages got modified by the Device Model. */
105 #define HVMOP_modified_memory 7
106 struct xen_hvm_modified_memory {
107 /* Domain to be updated. */
108 domid_t domid;
109 /* First pfn. */
110 uint64_aligned_t first_pfn;
111 /* Number of pages. */
112 uint64_aligned_t nr;
113 };
114 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
115 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
116
117 #define HVMOP_set_mem_type 8
118 /* Notify that a region of memory is to be treated in a specific way. */
119 struct xen_hvm_set_mem_type {
120 /* Domain to be updated. */
121 domid_t domid;
122 /* Memory type */
123 uint16_t hvmmem_type;
124 /* Number of pages. */
125 uint32_t nr;
126 /* First pfn. */
127 uint64_aligned_t first_pfn;
128 };
129 typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
130 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
131
132 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
133
134 /* Hint from PV drivers for pagetable destruction. */
135 #define HVMOP_pagetable_dying 9
136 struct xen_hvm_pagetable_dying {
137 /* Domain with a pagetable about to be destroyed. */
138 domid_t domid;
139 uint16_t pad[3]; /* align next field on 8-byte boundary */
140 /* guest physical address of the toplevel pagetable dying */
141 uint64_t gpa;
142 };
143 typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t;
144 DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);
145
146 /* Get the current Xen time, in nanoseconds since system boot. */
147 #define HVMOP_get_time 10
148 struct xen_hvm_get_time {
149 uint64_t now; /* OUT */
150 };
151 typedef struct xen_hvm_get_time xen_hvm_get_time_t;
152 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);
153
154 #define HVMOP_xentrace 11
155 struct xen_hvm_xentrace {
156 uint16_t event, extra_bytes;
157 uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
158 };
159 typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
160 DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
161
162 /* Following tools-only interfaces may change in future. */
163 #if defined(__XEN__) || defined(__XEN_TOOLS__)
164
165 #define HVMOP_set_mem_access 12
166 typedef enum {
167 HVMMEM_access_n,
168 HVMMEM_access_r,
169 HVMMEM_access_w,
170 HVMMEM_access_rw,
171 HVMMEM_access_x,
172 HVMMEM_access_rx,
173 HVMMEM_access_wx,
174 HVMMEM_access_rwx,
175 HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
176 * change to r-w on a write */
177 HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
178 * goes to rwx, generating an event without
179 * pausing the vcpu */
180 HVMMEM_access_default /* Take the domain default */
181 } hvmmem_access_t;
182 /* Notify that a region of memory is to have specific access types */
183 struct xen_hvm_set_mem_access {
184 /* Domain to be updated. */
185 domid_t domid;
186 /* Memory type */
187 uint16_t hvmmem_access; /* hvm_access_t */
188 /* Number of pages, ignored on setting default access */
189 uint32_t nr;
190 /* First pfn, or ~0ull to set the default access for new pages */
191 uint64_aligned_t first_pfn;
192 };
193 typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
194 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
195
196 #define HVMOP_get_mem_access 13
197 /* Get the specific access type for that region of memory */
198 struct xen_hvm_get_mem_access {
199 /* Domain to be queried. */
200 domid_t domid;
201 /* Memory type: OUT */
202 uint16_t hvmmem_access; /* hvm_access_t */
203 /* pfn, or ~0ull for default access for new pages. IN */
204 uint64_aligned_t pfn;
205 };
206 typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
207 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
208
209 #define HVMOP_inject_trap 14
210 /* Inject a trap into a VCPU, which will get taken up on the next
211 * scheduling of it. Note that the caller should know enough of the
212 * state of the CPU before injecting, to know what the effect of
213 * injecting the trap will be.
214 */
215 struct xen_hvm_inject_trap {
216 /* Domain to be queried. */
217 domid_t domid;
218 /* VCPU */
219 uint32_t vcpuid;
220 /* Vector number */
221 uint32_t vector;
222 /* Trap type (HVMOP_TRAP_*) */
223 uint32_t type;
224 /* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
225 # define HVMOP_TRAP_ext_int 0 /* external interrupt */
226 # define HVMOP_TRAP_nmi 2 /* nmi */
227 # define HVMOP_TRAP_hw_exc 3 /* hardware exception */
228 # define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
229 # define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
230 # define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
231 /* Error code, or ~0u to skip */
232 uint32_t error_code;
233 /* Intruction length */
234 uint32_t insn_len;
235 /* CR2 for page faults */
236 uint64_aligned_t cr2;
237 };
238 typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
239 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
240
241 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
242
243 #define HVMOP_get_mem_type 15
244 /* Return hvmmem_type_t for the specified pfn. */
245 struct xen_hvm_get_mem_type {
246 /* Domain to be queried. */
247 domid_t domid;
248 /* OUT variable. */
249 uint16_t mem_type;
250 uint16_t pad[2]; /* align next field on 8-byte boundary */
251 /* IN variable. */
252 uint64_t pfn;
253 };
254 typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
255 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
256
257 /* Following tools-only interfaces may change in future. */
258 #if defined(__XEN__) || defined(__XEN_TOOLS__)
259
260 /* MSI injection for emulated devices */
261 #define HVMOP_inject_msi 16
262 struct xen_hvm_inject_msi {
263 /* Domain to be injected */
264 domid_t domid;
265 /* Data -- lower 32 bits */
266 uint32_t data;
267 /* Address (0xfeexxxxx) */
268 uint64_t addr;
269 };
270 typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
271 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
272
273 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
274
275 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
Cache object: 6869fbed03d0daf119812bf7dc565d6d
|