1 /*
2 * Copyright (c) 2016, Citrix Systems Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef __XEN_PUBLIC_HVM_DM_OP_H__
25 #define __XEN_PUBLIC_HVM_DM_OP_H__
26
27 #include "../xen.h"
28 #include "../event_channel.h"
29
30 #ifndef uint64_aligned_t
31 #define uint64_aligned_t uint64_t
32 #endif
33
34 /*
35 * IOREQ Servers
36 *
37 * The interface between an I/O emulator an Xen is called an IOREQ Server.
38 * A domain supports a single 'legacy' IOREQ Server which is instantiated if
39 * parameter...
40 *
41 * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
42 * ioreq structures), or...
43 * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
44 * ioreq ring), or...
45 * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
46 * to request buffered I/O emulation).
47 *
48 * The following hypercalls facilitate the creation of IOREQ Servers for
49 * 'secondary' emulators which are invoked to implement port I/O, memory, or
50 * PCI config space ranges which they explicitly register.
51 */
52
53 typedef uint16_t ioservid_t;
54
55 /*
56 * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
57 * secondary emulator.
58 *
59 * The <id> handed back is unique for target domain. The valur of
60 * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
61 * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then the buffered
62 * ioreq ring will not be allocated and hence all emulation requests to
63 * this server will be synchronous.
64 */
65 #define XEN_DMOP_create_ioreq_server 1
66
67 struct xen_dm_op_create_ioreq_server {
68 /* IN - should server handle buffered ioreqs */
69 uint8_t handle_bufioreq;
70 uint8_t pad[3];
71 /* OUT - server id */
72 ioservid_t id;
73 };
74 typedef struct xen_dm_op_create_ioreq_server xen_dm_op_create_ioreq_server_t;
75
76 /*
77 * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
78 * access IOREQ Server <id>.
79 *
80 * If the IOREQ Server is handling buffered emulation requests, the
81 * emulator needs to bind to event channel <bufioreq_port> to listen for
82 * them. (The event channels used for synchronous emulation requests are
83 * specified in the per-CPU ioreq structures).
84 * In addition, if the XENMEM_acquire_resource memory op cannot be used,
85 * the emulator will need to map the synchronous ioreq structures and
86 * buffered ioreq ring (if it exists) from guest memory. If <flags> does
87 * not contain XEN_DMOP_no_gfns then these pages will be made available and
88 * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
89 * respectively. (If the IOREQ Server is not handling buffered emulation
90 * only <ioreq_gfn> will be valid).
91 *
92 * NOTE: To access the synchronous ioreq structures and buffered ioreq
93 * ring, it is preferable to use the XENMEM_acquire_resource memory
94 * op specifying resource type XENMEM_resource_ioreq_server.
95 */
96 #define XEN_DMOP_get_ioreq_server_info 2
97
98 struct xen_dm_op_get_ioreq_server_info {
99 /* IN - server id */
100 ioservid_t id;
101 /* IN - flags */
102 uint16_t flags;
103
104 #define _XEN_DMOP_no_gfns 0
105 #define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
106
107 /* OUT - buffered ioreq port */
108 evtchn_port_t bufioreq_port;
109 /* OUT - sync ioreq gfn (see block comment above) */
110 uint64_aligned_t ioreq_gfn;
111 /* OUT - buffered ioreq gfn (see block comment above)*/
112 uint64_aligned_t bufioreq_gfn;
113 };
114 typedef struct xen_dm_op_get_ioreq_server_info xen_dm_op_get_ioreq_server_info_t;
115
116 /*
117 * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
118 * emulation by the client of
119 * IOREQ Server <id>.
120 * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
121 * previously registered for
122 * emulation by the client of
123 * IOREQ Server <id>.
124 *
125 * There are three types of I/O that can be emulated: port I/O, memory
126 * accesses and PCI config space accesses. The <type> field denotes which
127 * type of range* the <start> and <end> (inclusive) fields are specifying.
128 * PCI config space ranges are specified by segment/bus/device/function
129 * values which should be encoded using the DMOP_PCI_SBDF helper macro
130 * below.
131 *
132 * NOTE: unless an emulation request falls entirely within a range mapped
133 * by a secondary emulator, it will not be passed to that emulator.
134 */
135 #define XEN_DMOP_map_io_range_to_ioreq_server 3
136 #define XEN_DMOP_unmap_io_range_from_ioreq_server 4
137
138 struct xen_dm_op_ioreq_server_range {
139 /* IN - server id */
140 ioservid_t id;
141 uint16_t pad;
142 /* IN - type of range */
143 uint32_t type;
144 # define XEN_DMOP_IO_RANGE_PORT 0 /* I/O port range */
145 # define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
146 # define XEN_DMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
147 /* IN - inclusive start and end of range */
148 uint64_aligned_t start, end;
149 };
150 typedef struct xen_dm_op_ioreq_server_range xen_dm_op_ioreq_server_range_t;
151
152 #define XEN_DMOP_PCI_SBDF(s,b,d,f) \
153 ((((s) & 0xffff) << 16) | \
154 (((b) & 0xff) << 8) | \
155 (((d) & 0x1f) << 3) | \
156 ((f) & 0x07))
157
158 /*
159 * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
160 *
161 * The IOREQ Server will not be passed any emulation requests until it is
162 * in the enabled state.
163 * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
164 * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
165 * is in the enabled state.
166 */
167 #define XEN_DMOP_set_ioreq_server_state 5
168
169 struct xen_dm_op_set_ioreq_server_state {
170 /* IN - server id */
171 ioservid_t id;
172 /* IN - enabled? */
173 uint8_t enabled;
174 uint8_t pad;
175 };
176 typedef struct xen_dm_op_set_ioreq_server_state xen_dm_op_set_ioreq_server_state_t;
177
178 /*
179 * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
180 *
181 * Any registered I/O ranges will be automatically deregistered.
182 */
183 #define XEN_DMOP_destroy_ioreq_server 6
184
185 struct xen_dm_op_destroy_ioreq_server {
186 /* IN - server id */
187 ioservid_t id;
188 uint16_t pad;
189 };
190 typedef struct xen_dm_op_destroy_ioreq_server xen_dm_op_destroy_ioreq_server_t;
191
192 /*
193 * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
194 * range.
195 *
196 * NOTE: The bitmap passed back to the caller is passed in a
197 * secondary buffer.
198 */
199 #define XEN_DMOP_track_dirty_vram 7
200
201 struct xen_dm_op_track_dirty_vram {
202 /* IN - number of pages to be tracked */
203 uint32_t nr;
204 uint32_t pad;
205 /* IN - first pfn to track */
206 uint64_aligned_t first_pfn;
207 };
208 typedef struct xen_dm_op_track_dirty_vram xen_dm_op_track_dirty_vram_t;
209
210 /*
211 * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
212 * PCI INTx pins.
213 */
214 #define XEN_DMOP_set_pci_intx_level 8
215
216 struct xen_dm_op_set_pci_intx_level {
217 /* IN - PCI INTx identification (domain:bus:device:intx) */
218 uint16_t domain;
219 uint8_t bus, device, intx;
220 /* IN - Level: 0 -> deasserted, 1 -> asserted */
221 uint8_t level;
222 };
223 typedef struct xen_dm_op_set_pci_intx_level xen_dm_op_set_pci_intx_level_t;
224
225 /*
226 * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
227 * ISA IRQ lines.
228 */
229 #define XEN_DMOP_set_isa_irq_level 9
230
231 struct xen_dm_op_set_isa_irq_level {
232 /* IN - ISA IRQ (0-15) */
233 uint8_t isa_irq;
234 /* IN - Level: 0 -> deasserted, 1 -> asserted */
235 uint8_t level;
236 };
237 typedef struct xen_dm_op_set_isa_irq_level xen_dm_op_set_isa_irq_level_t;
238
239 /*
240 * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
241 */
242 #define XEN_DMOP_set_pci_link_route 10
243
244 struct xen_dm_op_set_pci_link_route {
245 /* PCI INTx line (0-3) */
246 uint8_t link;
247 /* ISA IRQ (1-15) or 0 -> disable link */
248 uint8_t isa_irq;
249 };
250 typedef struct xen_dm_op_set_pci_link_route xen_dm_op_set_pci_link_route_t;
251
252 /*
253 * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
254 * an emulator.
255 *
256 * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
257 * @nr_extents entries.
258 *
259 * On error, @nr_extents will contain the index+1 of the extent that
260 * had the error. It is not defined if or which pages may have been
261 * marked as dirty, in this event.
262 */
263 #define XEN_DMOP_modified_memory 11
264
265 struct xen_dm_op_modified_memory {
266 /*
267 * IN - Number of extents to be processed
268 * OUT -returns n+1 for failing extent
269 */
270 uint32_t nr_extents;
271 /* IN/OUT - Must be set to 0 */
272 uint32_t opaque;
273 };
274 typedef struct xen_dm_op_modified_memory xen_dm_op_modified_memory_t;
275
276 struct xen_dm_op_modified_memory_extent {
277 /* IN - number of contiguous pages modified */
278 uint32_t nr;
279 uint32_t pad;
280 /* IN - first pfn modified */
281 uint64_aligned_t first_pfn;
282 };
283
284 /*
285 * XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
286 * in a specific way. (See definition of
287 * hvmmem_type_t).
288 *
289 * NOTE: In the event of a continuation (return code -ERESTART), the
290 * @first_pfn is set to the value of the pfn of the remaining
291 * region and @nr reduced to the size of the remaining region.
292 */
293 #define XEN_DMOP_set_mem_type 12
294
295 struct xen_dm_op_set_mem_type {
296 /* IN - number of contiguous pages */
297 uint32_t nr;
298 /* IN - new hvmmem_type_t of region */
299 uint16_t mem_type;
300 uint16_t pad;
301 /* IN - first pfn in region */
302 uint64_aligned_t first_pfn;
303 };
304 typedef struct xen_dm_op_set_mem_type xen_dm_op_set_mem_type_t;
305
306 /*
307 * XEN_DMOP_inject_event: Inject an event into a VCPU, which will
308 * get taken up when it is next scheduled.
309 *
310 * Note that the caller should know enough of the state of the CPU before
311 * injecting, to know what the effect of injecting the event will be.
312 */
313 #define XEN_DMOP_inject_event 13
314
315 struct xen_dm_op_inject_event {
316 /* IN - index of vCPU */
317 uint32_t vcpuid;
318 /* IN - interrupt vector */
319 uint8_t vector;
320 /* IN - event type (DMOP_EVENT_* ) */
321 uint8_t type;
322 /* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
323 # define XEN_DMOP_EVENT_ext_int 0 /* external interrupt */
324 # define XEN_DMOP_EVENT_nmi 2 /* nmi */
325 # define XEN_DMOP_EVENT_hw_exc 3 /* hardware exception */
326 # define XEN_DMOP_EVENT_sw_int 4 /* software interrupt (CD nn) */
327 # define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
328 # define XEN_DMOP_EVENT_sw_exc 6 /* INT3 (CC), INTO (CE) */
329 /* IN - instruction length */
330 uint8_t insn_len;
331 uint8_t pad0;
332 /* IN - error code (or ~0 to skip) */
333 uint32_t error_code;
334 uint32_t pad1;
335 /* IN - type-specific extra data (%cr2 for #PF, pending_dbg for #DB) */
336 uint64_aligned_t cr2;
337 };
338 typedef struct xen_dm_op_inject_event xen_dm_op_inject_event_t;
339
340 /*
341 * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
342 */
343 #define XEN_DMOP_inject_msi 14
344
345 struct xen_dm_op_inject_msi {
346 /* IN - MSI data (lower 32 bits) */
347 uint32_t data;
348 uint32_t pad;
349 /* IN - MSI address (0xfeexxxxx) */
350 uint64_aligned_t addr;
351 };
352 typedef struct xen_dm_op_inject_msi xen_dm_op_inject_msi_t;
353
354 /*
355 * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
356 * to specific memory type <type>
357 * for specific accesses <flags>
358 *
359 * For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
360 * which means only write operations are to be forwarded to an ioreq server.
361 * Support for the emulation of read operations can be added when an ioreq
362 * server has such requirement in future.
363 */
364 #define XEN_DMOP_map_mem_type_to_ioreq_server 15
365
366 struct xen_dm_op_map_mem_type_to_ioreq_server {
367 ioservid_t id; /* IN - ioreq server id */
368 uint16_t type; /* IN - memory type */
369 uint32_t flags; /* IN - types of accesses to be forwarded to the
370 ioreq server. flags with 0 means to unmap the
371 ioreq server */
372
373 #define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
374 #define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
375
376 uint64_t opaque; /* IN/OUT - only used for hypercall continuation,
377 has to be set to zero by the caller */
378 };
379 typedef struct xen_dm_op_map_mem_type_to_ioreq_server xen_dm_op_map_mem_type_to_ioreq_server_t;
380
381 /*
382 * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
383 * Identical to SCHEDOP_remote_shutdown
384 */
385 #define XEN_DMOP_remote_shutdown 16
386
387 struct xen_dm_op_remote_shutdown {
388 uint32_t reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
389 /* (Other reason values are not blocked) */
390 };
391 typedef struct xen_dm_op_remote_shutdown xen_dm_op_remote_shutdown_t;
392
393 /*
394 * XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest.
395 * Identical to XENMEM_add_to_physmap with
396 * space == XENMAPSPACE_gmfn_range.
397 */
398 #define XEN_DMOP_relocate_memory 17
399
400 struct xen_dm_op_relocate_memory {
401 /* All fields are IN/OUT, with their OUT state undefined. */
402 /* Number of GFNs to process. */
403 uint32_t size;
404 uint32_t pad;
405 /* Starting GFN to relocate. */
406 uint64_aligned_t src_gfn;
407 /* Starting GFN where GFNs should be relocated. */
408 uint64_aligned_t dst_gfn;
409 };
410 typedef struct xen_dm_op_relocate_memory xen_dm_op_relocate_memory_t;
411
412 /*
413 * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space.
414 * Identical to XEN_DOMCTL_pin_mem_cacheattr.
415 */
416 #define XEN_DMOP_pin_memory_cacheattr 18
417
418 struct xen_dm_op_pin_memory_cacheattr {
419 uint64_aligned_t start; /* Start gfn. */
420 uint64_aligned_t end; /* End gfn. */
421 /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
422 #define XEN_DMOP_MEM_CACHEATTR_UC 0
423 #define XEN_DMOP_MEM_CACHEATTR_WC 1
424 #define XEN_DMOP_MEM_CACHEATTR_WT 4
425 #define XEN_DMOP_MEM_CACHEATTR_WP 5
426 #define XEN_DMOP_MEM_CACHEATTR_WB 6
427 #define XEN_DMOP_MEM_CACHEATTR_UCM 7
428 #define XEN_DMOP_DELETE_MEM_CACHEATTR (~(uint32_t)0)
429 uint32_t type; /* XEN_DMOP_MEM_CACHEATTR_* */
430 uint32_t pad;
431 };
432 typedef struct xen_dm_op_pin_memory_cacheattr xen_dm_op_pin_memory_cacheattr_t;
433
434 /*
435 * XEN_DMOP_set_irq_level: Set the logical level of a one of a domain's
436 * IRQ lines (currently Arm only).
437 * Only SPIs are supported.
438 */
439 #define XEN_DMOP_set_irq_level 19
440
441 struct xen_dm_op_set_irq_level {
442 uint32_t irq;
443 /* IN - Level: 0 -> deasserted, 1 -> asserted */
444 uint8_t level;
445 uint8_t pad[3];
446 };
447 typedef struct xen_dm_op_set_irq_level xen_dm_op_set_irq_level_t;
448
449 /*
450 * XEN_DMOP_nr_vcpus: Query the number of vCPUs a domain has.
451 *
452 * This is the number of vcpu objects allocated in Xen for the domain, and is
453 * fixed from creation time. This bound is applicable to e.g. the vcpuid
454 * parameter of XEN_DMOP_inject_event, or number of struct ioreq objects
455 * mapped via XENMEM_acquire_resource.
456 */
457 #define XEN_DMOP_nr_vcpus 20
458
459 struct xen_dm_op_nr_vcpus {
460 uint32_t vcpus; /* OUT */
461 };
462 typedef struct xen_dm_op_nr_vcpus xen_dm_op_nr_vcpus_t;
463
464 struct xen_dm_op {
465 uint32_t op;
466 uint32_t pad;
467 union {
468 xen_dm_op_create_ioreq_server_t create_ioreq_server;
469 xen_dm_op_get_ioreq_server_info_t get_ioreq_server_info;
470 xen_dm_op_ioreq_server_range_t map_io_range_to_ioreq_server;
471 xen_dm_op_ioreq_server_range_t unmap_io_range_from_ioreq_server;
472 xen_dm_op_set_ioreq_server_state_t set_ioreq_server_state;
473 xen_dm_op_destroy_ioreq_server_t destroy_ioreq_server;
474 xen_dm_op_track_dirty_vram_t track_dirty_vram;
475 xen_dm_op_set_pci_intx_level_t set_pci_intx_level;
476 xen_dm_op_set_isa_irq_level_t set_isa_irq_level;
477 xen_dm_op_set_irq_level_t set_irq_level;
478 xen_dm_op_set_pci_link_route_t set_pci_link_route;
479 xen_dm_op_modified_memory_t modified_memory;
480 xen_dm_op_set_mem_type_t set_mem_type;
481 xen_dm_op_inject_event_t inject_event;
482 xen_dm_op_inject_msi_t inject_msi;
483 xen_dm_op_map_mem_type_to_ioreq_server_t map_mem_type_to_ioreq_server;
484 xen_dm_op_remote_shutdown_t remote_shutdown;
485 xen_dm_op_relocate_memory_t relocate_memory;
486 xen_dm_op_pin_memory_cacheattr_t pin_memory_cacheattr;
487 xen_dm_op_nr_vcpus_t nr_vcpus;
488 } u;
489 };
490
491 struct xen_dm_op_buf {
492 XEN_GUEST_HANDLE(void) h;
493 xen_ulong_t size;
494 };
495 typedef struct xen_dm_op_buf xen_dm_op_buf_t;
496 DEFINE_XEN_GUEST_HANDLE(xen_dm_op_buf_t);
497
498 /* ` enum neg_errnoval
499 * ` HYPERVISOR_dm_op(domid_t domid,
500 * ` unsigned int nr_bufs,
501 * ` xen_dm_op_buf_t bufs[])
502 * `
503 *
504 * @domid is the domain the hypercall operates on.
505 * @nr_bufs is the number of buffers in the @bufs array.
506 * @bufs points to an array of buffers where @bufs[0] contains a struct
507 * xen_dm_op, describing the specific device model operation and its
508 * parameters.
509 * @bufs[1..] may be referenced in the parameters for the purposes of
510 * passing extra information to or from the domain.
511 */
512
513 #endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */
514
515 /*
516 * Local variables:
517 * mode: C
518 * c-file-style: "BSD"
519 * c-basic-offset: 4
520 * tab-width: 4
521 * indent-tabs-mode: nil
522 * End:
523 */
Cache object: c275d582f476249ec89ccc7d8262b86c
|