1 /******************************************************************************
2 * memory.h
3 *
4 * Memory reservation and information.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
25 */
26
27 #ifndef __XEN_PUBLIC_MEMORY_H__
28 #define __XEN_PUBLIC_MEMORY_H__
29
30 #include "xen.h"
31 #include "physdev.h"
32
33 /*
34 * Increase or decrease the specified domain's memory reservation. Returns the
35 * number of extents successfully allocated or freed.
36 * arg == addr of struct xen_memory_reservation.
37 */
38 #define XENMEM_increase_reservation 0
39 #define XENMEM_decrease_reservation 1
40 #define XENMEM_populate_physmap 6
41
42 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
43 /*
44 * Maximum # bits addressable by the user of the allocated region (e.g., I/O
45 * devices often have a 32-bit limitation even in 64-bit systems). If zero
46 * then the user has no addressing restriction. This field is not used by
47 * XENMEM_decrease_reservation.
48 */
49 #define XENMEMF_address_bits(x) (x)
50 #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
51 /* NUMA node to allocate from. */
52 #define XENMEMF_node(x) (((x) + 1) << 8)
53 #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
54 /* Flag to populate physmap with populate-on-demand entries */
55 #define XENMEMF_populate_on_demand (1<<16)
56 /* Flag to request allocation only from the node specified */
57 #define XENMEMF_exact_node_request (1<<17)
58 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
59 /* Flag to indicate the node specified is virtual node */
60 #define XENMEMF_vnode (1<<18)
61 #endif
62
63 struct xen_memory_reservation {
64
65 /*
66 * XENMEM_increase_reservation:
67 * OUT: MFN (*not* GMFN) bases of extents that were allocated
68 * XENMEM_decrease_reservation:
69 * IN: GMFN bases of extents to free
70 * XENMEM_populate_physmap:
71 * IN: GPFN bases of extents to populate with memory
72 * OUT: GMFN bases of extents that were allocated
73 * (NB. This command also updates the mach_to_phys translation table)
74 * XENMEM_claim_pages:
75 * IN: must be zero
76 */
77 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
78
79 /* Number of extents, and size/alignment of each (2^extent_order pages). */
80 xen_ulong_t nr_extents;
81 unsigned int extent_order;
82
83 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
84 /* XENMEMF flags. */
85 unsigned int mem_flags;
86 #else
87 unsigned int address_bits;
88 #endif
89
90 /*
91 * Domain whose reservation is being changed.
92 * Unprivileged domains can specify only DOMID_SELF.
93 */
94 domid_t domid;
95 };
96 typedef struct xen_memory_reservation xen_memory_reservation_t;
97 DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
98
99 /*
100 * An atomic exchange of memory pages. If return code is zero then
101 * @out.extent_list provides GMFNs of the newly-allocated memory.
102 * Returns zero on complete success, otherwise a negative error code.
103 * On complete success then always @nr_exchanged == @in.nr_extents.
104 * On partial success @nr_exchanged indicates how much work was done.
105 *
106 * Note that only PV guests can use this operation.
107 */
108 #define XENMEM_exchange 11
109 struct xen_memory_exchange {
110 /*
111 * [IN] Details of memory extents to be exchanged (GMFN bases).
112 * Note that @in.address_bits is ignored and unused.
113 */
114 struct xen_memory_reservation in;
115
116 /*
117 * [IN/OUT] Details of new memory extents.
118 * We require that:
119 * 1. @in.domid == @out.domid
120 * 2. @in.nr_extents << @in.extent_order ==
121 * @out.nr_extents << @out.extent_order
122 * 3. @in.extent_start and @out.extent_start lists must not overlap
123 * 4. @out.extent_start lists GPFN bases to be populated
124 * 5. @out.extent_start is overwritten with allocated GMFN bases
125 */
126 struct xen_memory_reservation out;
127
128 /*
129 * [OUT] Number of input extents that were successfully exchanged:
130 * 1. The first @nr_exchanged input extents were successfully
131 * deallocated.
132 * 2. The corresponding first entries in the output extent list correctly
133 * indicate the GMFNs that were successfully exchanged.
134 * 3. All other input and output extents are untouched.
135 * 4. If not all input exents are exchanged then the return code of this
136 * command will be non-zero.
137 * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
138 */
139 xen_ulong_t nr_exchanged;
140 };
141 typedef struct xen_memory_exchange xen_memory_exchange_t;
142 DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
143
144 /*
145 * Returns the maximum machine frame number of mapped RAM in this system.
146 * This command always succeeds (it never returns an error code).
147 * arg == NULL.
148 */
149 #define XENMEM_maximum_ram_page 2
150
151 struct xen_memory_domain {
152 /* [IN] Domain information is being queried for. */
153 domid_t domid;
154 };
155
156 /*
157 * Returns the current or maximum memory reservation, in pages, of the
158 * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
159 * arg == addr of struct xen_memory_domain.
160 */
161 #define XENMEM_current_reservation 3
162 #define XENMEM_maximum_reservation 4
163
164 /*
165 * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF).
166 * Returns -ve errcode on failure.
167 * arg == addr of struct xen_memory_domain.
168 */
169 #define XENMEM_maximum_gpfn 14
170
171 /*
172 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
173 * mapping table. Architectures which do not have a m2p table do not implement
174 * this command.
175 * arg == addr of xen_machphys_mfn_list_t.
176 */
177 #define XENMEM_machphys_mfn_list 5
178 struct xen_machphys_mfn_list {
179 /*
180 * Size of the 'extent_start' array. Fewer entries will be filled if the
181 * machphys table is smaller than max_extents * 2MB.
182 */
183 unsigned int max_extents;
184
185 /*
186 * Pointer to buffer to fill with list of extent starts. If there are
187 * any large discontiguities in the machine address space, 2MB gaps in
188 * the machphys table will be represented by an MFN base of zero.
189 */
190 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
191
192 /*
193 * Number of extents written to the above array. This will be smaller
194 * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
195 */
196 unsigned int nr_extents;
197 };
198 typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
199 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
200
201 /*
202 * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
203 *
204 * For a non compat caller, this functions similarly to
205 * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
206 * m2p table.
207 */
208 #define XENMEM_machphys_compat_mfn_list 25
209
210 /*
211 * Returns the location in virtual address space of the machine_to_phys
212 * mapping table. Architectures which do not have a m2p table, or which do not
213 * map it by default into guest address space, do not implement this command.
214 * arg == addr of xen_machphys_mapping_t.
215 */
216 #define XENMEM_machphys_mapping 12
217 struct xen_machphys_mapping {
218 xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
219 xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
220 };
221 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
222 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
223
224 /* Source mapping space. */
225 /* ` enum phys_map_space { */
226 #define XENMAPSPACE_shared_info 0 /* shared info page */
227 #define XENMAPSPACE_grant_table 1 /* grant table page */
228 #define XENMAPSPACE_gmfn 2 /* GMFN */
229 #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
230 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
231 * XENMEM_add_to_physmap_batch only. */
232 #define XENMAPSPACE_dev_mmio 5 /* device mmio region
233 ARM only; the region is mapped in
234 Stage-2 using the Normal Memory
235 Inner/Outer Write-Back Cacheable
236 memory attribute. */
237 /* ` } */
238
239 /*
240 * Sets the GPFN at which a particular page appears in the specified guest's
241 * physical address space (translated guests only).
242 * arg == addr of xen_add_to_physmap_t.
243 */
244 #define XENMEM_add_to_physmap 7
245 struct xen_add_to_physmap {
246 /* Which domain to change the mapping for. */
247 domid_t domid;
248
249 /* Number of pages to go through for gmfn_range */
250 uint16_t size;
251
252 unsigned int space; /* => enum phys_map_space */
253
254 #define XENMAPIDX_grant_table_status 0x80000000
255
256 /* Index into space being mapped. */
257 xen_ulong_t idx;
258
259 /* GPFN in domid where the source mapping page should appear. */
260 xen_pfn_t gpfn;
261 };
262 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
263 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
264
265 /* A batched version of add_to_physmap. */
266 #define XENMEM_add_to_physmap_batch 23
267 struct xen_add_to_physmap_batch {
268 /* IN */
269 /* Which domain to change the mapping for. */
270 domid_t domid;
271 uint16_t space; /* => enum phys_map_space */
272
273 /* Number of pages to go through */
274 uint16_t size;
275
276 #if __XEN_INTERFACE_VERSION__ < 0x00040700
277 domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
278 #else
279 union xen_add_to_physmap_batch_extra {
280 domid_t foreign_domid; /* gmfn_foreign */
281 uint16_t res0; /* All the other spaces. Should be 0 */
282 } u;
283 #endif
284
285 /* Indexes into space being mapped. */
286 XEN_GUEST_HANDLE(xen_ulong_t) idxs;
287
288 /* GPFN in domid where the source mapping page should appear. */
289 XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
290
291 /* OUT */
292
293 /* Per index error code. */
294 XEN_GUEST_HANDLE(int) errs;
295 };
296 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
297 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
298
299 #if __XEN_INTERFACE_VERSION__ < 0x00040400
300 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
301 #define xen_add_to_physmap_range xen_add_to_physmap_batch
302 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
303 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
304 #endif
305
306 /*
307 * Unmaps the page appearing at a particular GPFN from the specified guest's
308 * physical address space (translated guests only).
309 * arg == addr of xen_remove_from_physmap_t.
310 */
311 #define XENMEM_remove_from_physmap 15
312 struct xen_remove_from_physmap {
313 /* Which domain to change the mapping for. */
314 domid_t domid;
315
316 /* GPFN of the current mapping of the page. */
317 xen_pfn_t gpfn;
318 };
319 typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
320 DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
321
322 /*** REMOVED ***/
323 /*#define XENMEM_translate_gpfn_list 8*/
324
325 /*
326 * Returns the pseudo-physical memory map as it was when the domain
327 * was started (specified by XENMEM_set_memory_map).
328 * arg == addr of xen_memory_map_t.
329 */
330 #define XENMEM_memory_map 9
331 struct xen_memory_map {
332 /*
333 * On call the number of entries which can be stored in buffer. On
334 * return the number of entries which have been stored in
335 * buffer.
336 */
337 unsigned int nr_entries;
338
339 /*
340 * Entries in the buffer are in the same format as returned by the
341 * BIOS INT 0x15 EAX=0xE820 call.
342 */
343 XEN_GUEST_HANDLE(void) buffer;
344 };
345 typedef struct xen_memory_map xen_memory_map_t;
346 DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
347
348 /*
349 * Returns the real physical memory map. Passes the same structure as
350 * XENMEM_memory_map.
351 * Specifying buffer as NULL will return the number of entries required
352 * to store the complete memory map.
353 * arg == addr of xen_memory_map_t.
354 */
355 #define XENMEM_machine_memory_map 10
356
357 /*
358 * Set the pseudo-physical memory map of a domain, as returned by
359 * XENMEM_memory_map.
360 * arg == addr of xen_foreign_memory_map_t.
361 */
362 #define XENMEM_set_memory_map 13
363 struct xen_foreign_memory_map {
364 domid_t domid;
365 struct xen_memory_map map;
366 };
367 typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
368 DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
369
370 #define XENMEM_set_pod_target 16
371 #define XENMEM_get_pod_target 17
372 struct xen_pod_target {
373 /* IN */
374 uint64_t target_pages;
375 /* OUT */
376 uint64_t tot_pages;
377 uint64_t pod_cache_pages;
378 uint64_t pod_entries;
379 /* IN */
380 domid_t domid;
381 };
382 typedef struct xen_pod_target xen_pod_target_t;
383
384 #if defined(__XEN__) || defined(__XEN_TOOLS__)
385
386 #ifndef uint64_aligned_t
387 #define uint64_aligned_t uint64_t
388 #endif
389
390 /*
391 * Get the number of MFNs saved through memory sharing.
392 * The call never fails.
393 */
394 #define XENMEM_get_sharing_freed_pages 18
395 #define XENMEM_get_sharing_shared_pages 19
396
397 #define XENMEM_paging_op 20
398 #define XENMEM_paging_op_nominate 0
399 #define XENMEM_paging_op_evict 1
400 #define XENMEM_paging_op_prep 2
401
402 struct xen_mem_paging_op {
403 uint8_t op; /* XENMEM_paging_op_* */
404 domid_t domain;
405
406 /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
407 XEN_GUEST_HANDLE_64(const_uint8) buffer;
408 /* IN: gfn of page being operated on */
409 uint64_aligned_t gfn;
410 };
411 typedef struct xen_mem_paging_op xen_mem_paging_op_t;
412 DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
413
414 #define XENMEM_access_op 21
415 #define XENMEM_access_op_set_access 0
416 #define XENMEM_access_op_get_access 1
417 /*
418 * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
419 * currently unused, but since they have been in use please do not reuse them.
420 *
421 * #define XENMEM_access_op_enable_emulate 2
422 * #define XENMEM_access_op_disable_emulate 3
423 */
424 #define XENMEM_access_op_set_access_multi 4
425
426 typedef enum {
427 XENMEM_access_n,
428 XENMEM_access_r,
429 XENMEM_access_w,
430 XENMEM_access_rw,
431 XENMEM_access_x,
432 XENMEM_access_rx,
433 XENMEM_access_wx,
434 XENMEM_access_rwx,
435 /*
436 * Page starts off as r-x, but automatically
437 * change to r-w on a write
438 */
439 XENMEM_access_rx2rw,
440 /*
441 * Log access: starts off as n, automatically
442 * goes to rwx, generating an event without
443 * pausing the vcpu
444 */
445 XENMEM_access_n2rwx,
446 /* Take the domain default */
447 XENMEM_access_default
448 } xenmem_access_t;
449
450 struct xen_mem_access_op {
451 /* XENMEM_access_op_* */
452 uint8_t op;
453 /* xenmem_access_t */
454 uint8_t access;
455 domid_t domid;
456 /*
457 * Number of pages for set op (or size of pfn_list for
458 * XENMEM_access_op_set_access_multi)
459 * Ignored on setting default access and other ops
460 */
461 uint32_t nr;
462 /*
463 * First pfn for set op
464 * pfn for get op
465 * ~0ull is used to set and get the default access for pages
466 */
467 uint64_aligned_t pfn;
468 /*
469 * List of pfns to set access for
470 * Used only with XENMEM_access_op_set_access_multi
471 */
472 XEN_GUEST_HANDLE(const_uint64) pfn_list;
473 /*
474 * Corresponding list of access settings for pfn_list
475 * Used only with XENMEM_access_op_set_access_multi
476 */
477 XEN_GUEST_HANDLE(const_uint8) access_list;
478 };
479 typedef struct xen_mem_access_op xen_mem_access_op_t;
480 DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
481
482 #define XENMEM_sharing_op 22
483 #define XENMEM_sharing_op_nominate_gfn 0
484 #define XENMEM_sharing_op_nominate_gref 1
485 #define XENMEM_sharing_op_share 2
486 #define XENMEM_sharing_op_debug_gfn 3
487 #define XENMEM_sharing_op_debug_mfn 4
488 #define XENMEM_sharing_op_debug_gref 5
489 #define XENMEM_sharing_op_add_physmap 6
490 #define XENMEM_sharing_op_audit 7
491 #define XENMEM_sharing_op_range_share 8
492 #define XENMEM_sharing_op_fork 9
493 #define XENMEM_sharing_op_fork_reset 10
494
495 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
496 #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
497
498 /* The following allows sharing of grant refs. This is useful
499 * for sharing utilities sitting as "filters" in IO backends
500 * (e.g. memshr + blktap(2)). The IO backend is only exposed
501 * to grant references, and this allows sharing of the grefs */
502 #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
503
504 #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
505 (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
506 #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
507 ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
508 #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
509 ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
510
511 struct xen_mem_sharing_op {
512 uint8_t op; /* XENMEM_sharing_op_* */
513 domid_t domain;
514
515 union {
516 struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
517 union {
518 uint64_aligned_t gfn; /* IN: gfn to nominate */
519 uint32_t grant_ref; /* IN: grant ref to nominate */
520 } u;
521 uint64_aligned_t handle; /* OUT: the handle */
522 } nominate;
523 struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
524 uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
525 uint64_aligned_t source_handle; /* IN: handle to the source page */
526 uint64_aligned_t client_gfn; /* IN: the client gfn */
527 uint64_aligned_t client_handle; /* IN: handle to the client page */
528 domid_t client_domain; /* IN: the client domain id */
529 } share;
530 struct mem_sharing_op_range { /* OP_RANGE_SHARE */
531 uint64_aligned_t first_gfn; /* IN: the first gfn */
532 uint64_aligned_t last_gfn; /* IN: the last gfn */
533 uint64_aligned_t opaque; /* Must be set to 0 */
534 domid_t client_domain; /* IN: the client domain id */
535 uint16_t _pad[3]; /* Must be set to 0 */
536 } range;
537 struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
538 union {
539 uint64_aligned_t gfn; /* IN: gfn to debug */
540 uint64_aligned_t mfn; /* IN: mfn to debug */
541 uint32_t gref; /* IN: gref to debug */
542 } u;
543 } debug;
544 struct mem_sharing_op_fork { /* OP_FORK */
545 domid_t parent_domain; /* IN: parent's domain id */
546 /* Only makes sense for short-lived forks */
547 #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
548 /* Only makes sense for short-lived forks */
549 #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
550 uint16_t flags; /* IN: optional settings */
551 uint32_t pad; /* Must be set to 0 */
552 } fork;
553 } u;
554 };
555 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
556 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
557
558 /*
559 * Attempt to stake a claim for a domain on a quantity of pages
560 * of system RAM, but _not_ assign specific pageframes. Only
561 * arithmetic is performed so the hypercall is very fast and need
562 * not be preemptible, thus sidestepping time-of-check-time-of-use
563 * races for memory allocation. Returns 0 if the hypervisor page
564 * allocator has atomically and successfully claimed the requested
565 * number of pages, else non-zero.
566 *
567 * Any domain may have only one active claim. When sufficient memory
568 * has been allocated to resolve the claim, the claim silently expires.
569 * Claiming zero pages effectively resets any outstanding claim and
570 * is always successful.
571 *
572 * Note that a valid claim may be staked even after memory has been
573 * allocated for a domain. In this case, the claim is not incremental,
574 * i.e. if the domain's total page count is 3, and a claim is staked
575 * for 10, only 7 additional pages are claimed.
576 *
577 * Caller must be privileged or the hypercall fails.
578 */
579 #define XENMEM_claim_pages 24
580
581 /*
582 * XENMEM_claim_pages flags - the are no flags at this time.
583 * The zero value is appropriate.
584 */
585
586 /*
587 * With some legacy devices, certain guest-physical addresses cannot safely
588 * be used for other purposes, e.g. to map guest RAM. This hypercall
589 * enumerates those regions so the toolstack can avoid using them.
590 */
591 #define XENMEM_reserved_device_memory_map 27
592 struct xen_reserved_device_memory {
593 xen_pfn_t start_pfn;
594 xen_ulong_t nr_pages;
595 };
596 typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
597 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
598
599 struct xen_reserved_device_memory_map {
600 #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
601 /* IN */
602 uint32_t flags;
603 /*
604 * IN/OUT
605 *
606 * Gets set to the required number of entries when too low,
607 * signaled by error code -ERANGE.
608 */
609 unsigned int nr_entries;
610 /* OUT */
611 XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
612 /* IN */
613 union {
614 physdev_pci_device_t pci;
615 } dev;
616 };
617 typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
618 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
619
620 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
621
622 /*
623 * Get the pages for a particular guest resource, so that they can be
624 * mapped directly by a tools domain.
625 */
626 #define XENMEM_acquire_resource 28
627 struct xen_mem_acquire_resource {
628 /* IN - The domain whose resource is to be mapped */
629 domid_t domid;
630 /* IN - the type of resource */
631 uint16_t type;
632
633 #define XENMEM_resource_ioreq_server 0
634 #define XENMEM_resource_grant_table 1
635 #define XENMEM_resource_vmtrace_buf 2
636
637 /*
638 * IN - a type-specific resource identifier, which must be zero
639 * unless stated otherwise.
640 *
641 * type == XENMEM_resource_ioreq_server -> id == ioreq server id
642 * type == XENMEM_resource_grant_table -> id defined below
643 */
644 uint32_t id;
645
646 #define XENMEM_resource_grant_table_id_shared 0
647 #define XENMEM_resource_grant_table_id_status 1
648
649 /*
650 * IN/OUT
651 *
652 * As an IN parameter number of frames of the resource to be mapped.
653 * This value may be updated over the course of the operation.
654 *
655 * When frame_list is NULL and nr_frames is 0, this is interpreted as a
656 * request for the size of the resource, which shall be returned in the
657 * nr_frames field.
658 *
659 * The size of a resource will never be zero, but a nonzero result doesn't
660 * guarantee that a subsequent mapping request will be successful. There
661 * are further type/id specific constraints which may change between the
662 * two calls.
663 */
664 uint32_t nr_frames;
665 uint32_t pad;
666 /*
667 * IN - the index of the initial frame to be mapped. This parameter
668 * is ignored if nr_frames is 0. This value may be updated
669 * over the course of the operation.
670 */
671 uint64_t frame;
672
673 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
674 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
675
676 /*
677 * IN/OUT - If the tools domain is PV then, upon return, frame_list
678 * will be populated with the MFNs of the resource.
679 * If the tools domain is HVM then it is expected that, on
680 * entry, frame_list will be populated with a list of GFNs
681 * that will be mapped to the MFNs of the resource.
682 * If -EIO is returned then the frame_list has only been
683 * partially mapped and it is up to the caller to unmap all
684 * the GFNs.
685 * This parameter may be NULL if nr_frames is 0. This
686 * value may be updated over the course of the operation.
687 */
688 XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
689 };
690 typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
691 DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
692
693 /*
694 * XENMEM_get_vnumainfo used by guest to get
695 * vNUMA topology from hypervisor.
696 */
697 #define XENMEM_get_vnumainfo 26
698
699 /* vNUMA node memory ranges */
700 struct xen_vmemrange {
701 uint64_t start, end;
702 unsigned int flags;
703 unsigned int nid;
704 };
705 typedef struct xen_vmemrange xen_vmemrange_t;
706 DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
707
708 /*
709 * vNUMA topology specifies vNUMA node number, distance table,
710 * memory ranges and vcpu mapping provided for guests.
711 * XENMEM_get_vnumainfo hypercall expects to see from guest
712 * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
713 * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
714 * copied back to guest. Domain returns expected values of nr_vnodes,
715 * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
716 */
717 struct xen_vnuma_topology_info {
718 /* IN */
719 domid_t domid;
720 uint16_t pad;
721 /* IN/OUT */
722 unsigned int nr_vnodes;
723 unsigned int nr_vcpus;
724 unsigned int nr_vmemranges;
725 /* OUT */
726 union {
727 XEN_GUEST_HANDLE(uint) h;
728 uint64_t pad;
729 } vdistance;
730 union {
731 XEN_GUEST_HANDLE(uint) h;
732 uint64_t pad;
733 } vcpu_to_vnode;
734 union {
735 XEN_GUEST_HANDLE(xen_vmemrange_t) h;
736 uint64_t pad;
737 } vmemrange;
738 };
739 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
740 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
741
742 /* Next available subop number is 29 */
743
744 #endif /* __XEN_PUBLIC_MEMORY_H__ */
745
746 /*
747 * Local variables:
748 * mode: C
749 * c-file-style: "BSD"
750 * c-basic-offset: 4
751 * tab-width: 4
752 * indent-tabs-mode: nil
753 * End:
754 */
Cache object: 71d4e9606000cca42866c17d1fd96f2c
|