1 /******************************************************************************
2 * memory.h
3 *
4 * Memory reservation and information.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
25 */
26
27 #ifndef __XEN_PUBLIC_MEMORY_H__
28 #define __XEN_PUBLIC_MEMORY_H__
29
30 #include "xen.h"
31 #include "physdev.h"
32
33 /*
34 * Increase or decrease the specified domain's memory reservation. Returns the
35 * number of extents successfully allocated or freed.
36 * arg == addr of struct xen_memory_reservation.
37 */
38 #define XENMEM_increase_reservation 0
39 #define XENMEM_decrease_reservation 1
40 #define XENMEM_populate_physmap 6
41
42 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
43 /*
44 * Maximum # bits addressable by the user of the allocated region (e.g., I/O
45 * devices often have a 32-bit limitation even in 64-bit systems). If zero
46 * then the user has no addressing restriction. This field is not used by
47 * XENMEM_decrease_reservation.
48 */
49 #define XENMEMF_address_bits(x) (x)
50 #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
51 /* NUMA node to allocate from. */
52 #define XENMEMF_node(x) (((x) + 1) << 8)
53 #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
54 /* Flag to populate physmap with populate-on-demand entries */
55 #define XENMEMF_populate_on_demand (1<<16)
56 /* Flag to request allocation only from the node specified */
57 #define XENMEMF_exact_node_request (1<<17)
58 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
59 /* Flag to indicate the node specified is virtual node */
60 #define XENMEMF_vnode (1<<18)
61 #endif
62
63 struct xen_memory_reservation {
64
65 /*
66 * XENMEM_increase_reservation:
67 * OUT: MFN (*not* GMFN) bases of extents that were allocated
68 * XENMEM_decrease_reservation:
69 * IN: GMFN bases of extents to free
70 * XENMEM_populate_physmap:
71 * IN: GPFN bases of extents to populate with memory
72 * OUT: GMFN bases of extents that were allocated
73 * (NB. This command also updates the mach_to_phys translation table)
74 * XENMEM_claim_pages:
75 * IN: must be zero
76 */
77 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
78
79 /* Number of extents, and size/alignment of each (2^extent_order pages). */
80 xen_ulong_t nr_extents;
81 unsigned int extent_order;
82
83 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
84 /* XENMEMF flags. */
85 unsigned int mem_flags;
86 #else
87 unsigned int address_bits;
88 #endif
89
90 /*
91 * Domain whose reservation is being changed.
92 * Unprivileged domains can specify only DOMID_SELF.
93 */
94 domid_t domid;
95 };
96 typedef struct xen_memory_reservation xen_memory_reservation_t;
97 DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
98
99 /*
100 * An atomic exchange of memory pages. If return code is zero then
101 * @out.extent_list provides GMFNs of the newly-allocated memory.
102 * Returns zero on complete success, otherwise a negative error code.
103 * On complete success then always @nr_exchanged == @in.nr_extents.
104 * On partial success @nr_exchanged indicates how much work was done.
105 *
106 * Note that only PV guests can use this operation.
107 */
108 #define XENMEM_exchange 11
109 struct xen_memory_exchange {
110 /*
111 * [IN] Details of memory extents to be exchanged (GMFN bases).
112 * Note that @in.address_bits is ignored and unused.
113 */
114 struct xen_memory_reservation in;
115
116 /*
117 * [IN/OUT] Details of new memory extents.
118 * We require that:
119 * 1. @in.domid == @out.domid
120 * 2. @in.nr_extents << @in.extent_order ==
121 * @out.nr_extents << @out.extent_order
122 * 3. @in.extent_start and @out.extent_start lists must not overlap
123 * 4. @out.extent_start lists GPFN bases to be populated
124 * 5. @out.extent_start is overwritten with allocated GMFN bases
125 */
126 struct xen_memory_reservation out;
127
128 /*
129 * [OUT] Number of input extents that were successfully exchanged:
130 * 1. The first @nr_exchanged input extents were successfully
131 * deallocated.
132 * 2. The corresponding first entries in the output extent list correctly
133 * indicate the GMFNs that were successfully exchanged.
134 * 3. All other input and output extents are untouched.
135 * 4. If not all input exents are exchanged then the return code of this
136 * command will be non-zero.
137 * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
138 */
139 xen_ulong_t nr_exchanged;
140 };
141 typedef struct xen_memory_exchange xen_memory_exchange_t;
142 DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
143
144 /*
145 * Returns the maximum machine frame number of mapped RAM in this system.
146 * This command always succeeds (it never returns an error code).
147 * arg == NULL.
148 */
149 #define XENMEM_maximum_ram_page 2
150
151 /*
152 * Returns the current or maximum memory reservation, in pages, of the
153 * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
154 * arg == addr of domid_t.
155 */
156 #define XENMEM_current_reservation 3
157 #define XENMEM_maximum_reservation 4
158
159 /*
160 * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
161 */
162 #define XENMEM_maximum_gpfn 14
163
164 /*
165 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
166 * mapping table. Architectures which do not have a m2p table do not implement
167 * this command.
168 * arg == addr of xen_machphys_mfn_list_t.
169 */
170 #define XENMEM_machphys_mfn_list 5
171 struct xen_machphys_mfn_list {
172 /*
173 * Size of the 'extent_start' array. Fewer entries will be filled if the
174 * machphys table is smaller than max_extents * 2MB.
175 */
176 unsigned int max_extents;
177
178 /*
179 * Pointer to buffer to fill with list of extent starts. If there are
180 * any large discontiguities in the machine address space, 2MB gaps in
181 * the machphys table will be represented by an MFN base of zero.
182 */
183 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
184
185 /*
186 * Number of extents written to the above array. This will be smaller
187 * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
188 */
189 unsigned int nr_extents;
190 };
191 typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
192 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
193
194 /*
195 * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
196 *
197 * For a non compat caller, this functions similarly to
198 * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
199 * m2p table.
200 */
201 #define XENMEM_machphys_compat_mfn_list 25
202
203 /*
204 * Returns the location in virtual address space of the machine_to_phys
205 * mapping table. Architectures which do not have a m2p table, or which do not
206 * map it by default into guest address space, do not implement this command.
207 * arg == addr of xen_machphys_mapping_t.
208 */
209 #define XENMEM_machphys_mapping 12
210 struct xen_machphys_mapping {
211 xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
212 xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
213 };
214 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
215 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
216
217 /* Source mapping space. */
218 /* ` enum phys_map_space { */
219 #define XENMAPSPACE_shared_info 0 /* shared info page */
220 #define XENMAPSPACE_grant_table 1 /* grant table page */
221 #define XENMAPSPACE_gmfn 2 /* GMFN */
222 #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
223 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
224 * XENMEM_add_to_physmap_batch only. */
225 #define XENMAPSPACE_dev_mmio 5 /* device mmio region
226 ARM only; the region is mapped in
227 Stage-2 using the Normal Memory
228 Inner/Outer Write-Back Cacheable
229 memory attribute. */
230 /* ` } */
231
232 /*
233 * Sets the GPFN at which a particular page appears in the specified guest's
234 * physical address space (translated guests only).
235 * arg == addr of xen_add_to_physmap_t.
236 */
237 #define XENMEM_add_to_physmap 7
238 struct xen_add_to_physmap {
239 /* Which domain to change the mapping for. */
240 domid_t domid;
241
242 /* Number of pages to go through for gmfn_range */
243 uint16_t size;
244
245 unsigned int space; /* => enum phys_map_space */
246
247 #define XENMAPIDX_grant_table_status 0x80000000
248
249 /* Index into space being mapped. */
250 xen_ulong_t idx;
251
252 /* GPFN in domid where the source mapping page should appear. */
253 xen_pfn_t gpfn;
254 };
255 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
256 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
257
258 /* A batched version of add_to_physmap. */
259 #define XENMEM_add_to_physmap_batch 23
260 struct xen_add_to_physmap_batch {
261 /* IN */
262 /* Which domain to change the mapping for. */
263 domid_t domid;
264 uint16_t space; /* => enum phys_map_space */
265
266 /* Number of pages to go through */
267 uint16_t size;
268
269 #if __XEN_INTERFACE_VERSION__ < 0x00040700
270 domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
271 #else
272 union xen_add_to_physmap_batch_extra {
273 domid_t foreign_domid; /* gmfn_foreign */
274 uint16_t res0; /* All the other spaces. Should be 0 */
275 } u;
276 #endif
277
278 /* Indexes into space being mapped. */
279 XEN_GUEST_HANDLE(xen_ulong_t) idxs;
280
281 /* GPFN in domid where the source mapping page should appear. */
282 XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
283
284 /* OUT */
285
286 /* Per index error code. */
287 XEN_GUEST_HANDLE(int) errs;
288 };
289 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
290 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
291
292 #if __XEN_INTERFACE_VERSION__ < 0x00040400
293 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
294 #define xen_add_to_physmap_range xen_add_to_physmap_batch
295 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
296 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
297 #endif
298
299 /*
300 * Unmaps the page appearing at a particular GPFN from the specified guest's
301 * physical address space (translated guests only).
302 * arg == addr of xen_remove_from_physmap_t.
303 */
304 #define XENMEM_remove_from_physmap 15
305 struct xen_remove_from_physmap {
306 /* Which domain to change the mapping for. */
307 domid_t domid;
308
309 /* GPFN of the current mapping of the page. */
310 xen_pfn_t gpfn;
311 };
312 typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
313 DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
314
315 /*** REMOVED ***/
316 /*#define XENMEM_translate_gpfn_list 8*/
317
318 /*
319 * Returns the pseudo-physical memory map as it was when the domain
320 * was started (specified by XENMEM_set_memory_map).
321 * arg == addr of xen_memory_map_t.
322 */
323 #define XENMEM_memory_map 9
324 struct xen_memory_map {
325 /*
326 * On call the number of entries which can be stored in buffer. On
327 * return the number of entries which have been stored in
328 * buffer.
329 */
330 unsigned int nr_entries;
331
332 /*
333 * Entries in the buffer are in the same format as returned by the
334 * BIOS INT 0x15 EAX=0xE820 call.
335 */
336 XEN_GUEST_HANDLE(void) buffer;
337 };
338 typedef struct xen_memory_map xen_memory_map_t;
339 DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
340
341 /*
342 * Returns the real physical memory map. Passes the same structure as
343 * XENMEM_memory_map.
344 * Specifying buffer as NULL will return the number of entries required
345 * to store the complete memory map.
346 * arg == addr of xen_memory_map_t.
347 */
348 #define XENMEM_machine_memory_map 10
349
350 /*
351 * Set the pseudo-physical memory map of a domain, as returned by
352 * XENMEM_memory_map.
353 * arg == addr of xen_foreign_memory_map_t.
354 */
355 #define XENMEM_set_memory_map 13
356 struct xen_foreign_memory_map {
357 domid_t domid;
358 struct xen_memory_map map;
359 };
360 typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
361 DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
362
363 #define XENMEM_set_pod_target 16
364 #define XENMEM_get_pod_target 17
365 struct xen_pod_target {
366 /* IN */
367 uint64_t target_pages;
368 /* OUT */
369 uint64_t tot_pages;
370 uint64_t pod_cache_pages;
371 uint64_t pod_entries;
372 /* IN */
373 domid_t domid;
374 };
375 typedef struct xen_pod_target xen_pod_target_t;
376
377 #if defined(__XEN__) || defined(__XEN_TOOLS__)
378
379 #ifndef uint64_aligned_t
380 #define uint64_aligned_t uint64_t
381 #endif
382
383 /*
384 * Get the number of MFNs saved through memory sharing.
385 * The call never fails.
386 */
387 #define XENMEM_get_sharing_freed_pages 18
388 #define XENMEM_get_sharing_shared_pages 19
389
390 #define XENMEM_paging_op 20
391 #define XENMEM_paging_op_nominate 0
392 #define XENMEM_paging_op_evict 1
393 #define XENMEM_paging_op_prep 2
394
395 struct xen_mem_paging_op {
396 uint8_t op; /* XENMEM_paging_op_* */
397 domid_t domain;
398
399 /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
400 XEN_GUEST_HANDLE_64(const_uint8) buffer;
401 /* IN: gfn of page being operated on */
402 uint64_aligned_t gfn;
403 };
404 typedef struct xen_mem_paging_op xen_mem_paging_op_t;
405 DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
406
407 #define XENMEM_access_op 21
408 #define XENMEM_access_op_set_access 0
409 #define XENMEM_access_op_get_access 1
410 /*
411 * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
412 * currently unused, but since they have been in use please do not reuse them.
413 *
414 * #define XENMEM_access_op_enable_emulate 2
415 * #define XENMEM_access_op_disable_emulate 3
416 */
417 #define XENMEM_access_op_set_access_multi 4
418
419 typedef enum {
420 XENMEM_access_n,
421 XENMEM_access_r,
422 XENMEM_access_w,
423 XENMEM_access_rw,
424 XENMEM_access_x,
425 XENMEM_access_rx,
426 XENMEM_access_wx,
427 XENMEM_access_rwx,
428 /*
429 * Page starts off as r-x, but automatically
430 * change to r-w on a write
431 */
432 XENMEM_access_rx2rw,
433 /*
434 * Log access: starts off as n, automatically
435 * goes to rwx, generating an event without
436 * pausing the vcpu
437 */
438 XENMEM_access_n2rwx,
439 /* Take the domain default */
440 XENMEM_access_default
441 } xenmem_access_t;
442
443 struct xen_mem_access_op {
444 /* XENMEM_access_op_* */
445 uint8_t op;
446 /* xenmem_access_t */
447 uint8_t access;
448 domid_t domid;
449 /*
450 * Number of pages for set op (or size of pfn_list for
451 * XENMEM_access_op_set_access_multi)
452 * Ignored on setting default access and other ops
453 */
454 uint32_t nr;
455 /*
456 * First pfn for set op
457 * pfn for get op
458 * ~0ull is used to set and get the default access for pages
459 */
460 uint64_aligned_t pfn;
461 /*
462 * List of pfns to set access for
463 * Used only with XENMEM_access_op_set_access_multi
464 */
465 XEN_GUEST_HANDLE(const_uint64) pfn_list;
466 /*
467 * Corresponding list of access settings for pfn_list
468 * Used only with XENMEM_access_op_set_access_multi
469 */
470 XEN_GUEST_HANDLE(const_uint8) access_list;
471 };
472 typedef struct xen_mem_access_op xen_mem_access_op_t;
473 DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
474
475 #define XENMEM_sharing_op 22
476 #define XENMEM_sharing_op_nominate_gfn 0
477 #define XENMEM_sharing_op_nominate_gref 1
478 #define XENMEM_sharing_op_share 2
479 #define XENMEM_sharing_op_debug_gfn 3
480 #define XENMEM_sharing_op_debug_mfn 4
481 #define XENMEM_sharing_op_debug_gref 5
482 #define XENMEM_sharing_op_add_physmap 6
483 #define XENMEM_sharing_op_audit 7
484 #define XENMEM_sharing_op_range_share 8
485 #define XENMEM_sharing_op_fork 9
486 #define XENMEM_sharing_op_fork_reset 10
487
488 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
489 #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
490
491 /* The following allows sharing of grant refs. This is useful
492 * for sharing utilities sitting as "filters" in IO backends
493 * (e.g. memshr + blktap(2)). The IO backend is only exposed
494 * to grant references, and this allows sharing of the grefs */
495 #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
496
497 #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
498 (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
499 #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
500 ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
501 #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
502 ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
503
504 struct xen_mem_sharing_op {
505 uint8_t op; /* XENMEM_sharing_op_* */
506 domid_t domain;
507
508 union {
509 struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
510 union {
511 uint64_aligned_t gfn; /* IN: gfn to nominate */
512 uint32_t grant_ref; /* IN: grant ref to nominate */
513 } u;
514 uint64_aligned_t handle; /* OUT: the handle */
515 } nominate;
516 struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
517 uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
518 uint64_aligned_t source_handle; /* IN: handle to the source page */
519 uint64_aligned_t client_gfn; /* IN: the client gfn */
520 uint64_aligned_t client_handle; /* IN: handle to the client page */
521 domid_t client_domain; /* IN: the client domain id */
522 } share;
523 struct mem_sharing_op_range { /* OP_RANGE_SHARE */
524 uint64_aligned_t first_gfn; /* IN: the first gfn */
525 uint64_aligned_t last_gfn; /* IN: the last gfn */
526 uint64_aligned_t opaque; /* Must be set to 0 */
527 domid_t client_domain; /* IN: the client domain id */
528 uint16_t _pad[3]; /* Must be set to 0 */
529 } range;
530 struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
531 union {
532 uint64_aligned_t gfn; /* IN: gfn to debug */
533 uint64_aligned_t mfn; /* IN: mfn to debug */
534 uint32_t gref; /* IN: gref to debug */
535 } u;
536 } debug;
537 struct mem_sharing_op_fork { /* OP_FORK */
538 domid_t parent_domain; /* IN: parent's domain id */
539 /* Only makes sense for short-lived forks */
540 #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
541 /* Only makes sense for short-lived forks */
542 #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
543 uint16_t flags; /* IN: optional settings */
544 uint32_t pad; /* Must be set to 0 */
545 } fork;
546 } u;
547 };
548 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
549 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
550
551 /*
552 * Attempt to stake a claim for a domain on a quantity of pages
553 * of system RAM, but _not_ assign specific pageframes. Only
554 * arithmetic is performed so the hypercall is very fast and need
555 * not be preemptible, thus sidestepping time-of-check-time-of-use
556 * races for memory allocation. Returns 0 if the hypervisor page
557 * allocator has atomically and successfully claimed the requested
558 * number of pages, else non-zero.
559 *
560 * Any domain may have only one active claim. When sufficient memory
561 * has been allocated to resolve the claim, the claim silently expires.
562 * Claiming zero pages effectively resets any outstanding claim and
563 * is always successful.
564 *
565 * Note that a valid claim may be staked even after memory has been
566 * allocated for a domain. In this case, the claim is not incremental,
567 * i.e. if the domain's total page count is 3, and a claim is staked
568 * for 10, only 7 additional pages are claimed.
569 *
570 * Caller must be privileged or the hypercall fails.
571 */
572 #define XENMEM_claim_pages 24
573
574 /*
575 * XENMEM_claim_pages flags - the are no flags at this time.
576 * The zero value is appropriate.
577 */
578
579 /*
580 * With some legacy devices, certain guest-physical addresses cannot safely
581 * be used for other purposes, e.g. to map guest RAM. This hypercall
582 * enumerates those regions so the toolstack can avoid using them.
583 */
584 #define XENMEM_reserved_device_memory_map 27
585 struct xen_reserved_device_memory {
586 xen_pfn_t start_pfn;
587 xen_ulong_t nr_pages;
588 };
589 typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
590 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
591
592 struct xen_reserved_device_memory_map {
593 #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
594 /* IN */
595 uint32_t flags;
596 /*
597 * IN/OUT
598 *
599 * Gets set to the required number of entries when too low,
600 * signaled by error code -ERANGE.
601 */
602 unsigned int nr_entries;
603 /* OUT */
604 XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
605 /* IN */
606 union {
607 struct physdev_pci_device pci;
608 } dev;
609 };
610 typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
611 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
612
613 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
614
615 /*
616 * Get the pages for a particular guest resource, so that they can be
617 * mapped directly by a tools domain.
618 */
619 #define XENMEM_acquire_resource 28
620 struct xen_mem_acquire_resource {
621 /* IN - The domain whose resource is to be mapped */
622 domid_t domid;
623 /* IN - the type of resource */
624 uint16_t type;
625
626 #define XENMEM_resource_ioreq_server 0
627 #define XENMEM_resource_grant_table 1
628
629 /*
630 * IN - a type-specific resource identifier, which must be zero
631 * unless stated otherwise.
632 *
633 * type == XENMEM_resource_ioreq_server -> id == ioreq server id
634 * type == XENMEM_resource_grant_table -> id defined below
635 */
636 uint32_t id;
637
638 #define XENMEM_resource_grant_table_id_shared 0
639 #define XENMEM_resource_grant_table_id_status 1
640
641 /*
642 * IN/OUT - As an IN parameter number of frames of the resource
643 * to be mapped. However, if the specified value is 0 and
644 * frame_list is NULL then this field will be set to the
645 * maximum value supported by the implementation on return.
646 */
647 uint32_t nr_frames;
648 uint32_t pad;
649 /*
650 * IN - the index of the initial frame to be mapped. This parameter
651 * is ignored if nr_frames is 0.
652 */
653 uint64_t frame;
654
655 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
656 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
657
658 /*
659 * IN/OUT - If the tools domain is PV then, upon return, frame_list
660 * will be populated with the MFNs of the resource.
661 * If the tools domain is HVM then it is expected that, on
662 * entry, frame_list will be populated with a list of GFNs
663 * that will be mapped to the MFNs of the resource.
664 * If -EIO is returned then the frame_list has only been
665 * partially mapped and it is up to the caller to unmap all
666 * the GFNs.
667 * This parameter may be NULL if nr_frames is 0.
668 */
669 XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
670 };
671 typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
672 DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
673
674 /*
675 * XENMEM_get_vnumainfo used by guest to get
676 * vNUMA topology from hypervisor.
677 */
678 #define XENMEM_get_vnumainfo 26
679
680 /* vNUMA node memory ranges */
681 struct xen_vmemrange {
682 uint64_t start, end;
683 unsigned int flags;
684 unsigned int nid;
685 };
686 typedef struct xen_vmemrange xen_vmemrange_t;
687 DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
688
689 /*
690 * vNUMA topology specifies vNUMA node number, distance table,
691 * memory ranges and vcpu mapping provided for guests.
692 * XENMEM_get_vnumainfo hypercall expects to see from guest
693 * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
694 * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
695 * copied back to guest. Domain returns expected values of nr_vnodes,
696 * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
697 */
698 struct xen_vnuma_topology_info {
699 /* IN */
700 domid_t domid;
701 uint16_t pad;
702 /* IN/OUT */
703 unsigned int nr_vnodes;
704 unsigned int nr_vcpus;
705 unsigned int nr_vmemranges;
706 /* OUT */
707 union {
708 XEN_GUEST_HANDLE(uint) h;
709 uint64_t pad;
710 } vdistance;
711 union {
712 XEN_GUEST_HANDLE(uint) h;
713 uint64_t pad;
714 } vcpu_to_vnode;
715 union {
716 XEN_GUEST_HANDLE(xen_vmemrange_t) h;
717 uint64_t pad;
718 } vmemrange;
719 };
720 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
721 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
722
723 /* Next available subop number is 29 */
724
725 #endif /* __XEN_PUBLIC_MEMORY_H__ */
726
727 /*
728 * Local variables:
729 * mode: C
730 * c-file-style: "BSD"
731 * c-basic-offset: 4
732 * tab-width: 4
733 * indent-tabs-mode: nil
734 * End:
735 */
Cache object: 96f6b4fd5fbdccb971d2a119ae966285
|