1 /******************************************************************************
2 * memory.h
3 *
4 * Memory reservation and information.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
25 */
26
27 #ifndef __XEN_PUBLIC_MEMORY_H__
28 #define __XEN_PUBLIC_MEMORY_H__
29
30 #include "xen.h"
31 #include "physdev.h"
32
33 /*
34 * Increase or decrease the specified domain's memory reservation. Returns the
35 * number of extents successfully allocated or freed.
36 * arg == addr of struct xen_memory_reservation.
37 */
38 #define XENMEM_increase_reservation 0
39 #define XENMEM_decrease_reservation 1
40 #define XENMEM_populate_physmap 6
41
42 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
43 /*
44 * Maximum # bits addressable by the user of the allocated region (e.g., I/O
45 * devices often have a 32-bit limitation even in 64-bit systems). If zero
46 * then the user has no addressing restriction. This field is not used by
47 * XENMEM_decrease_reservation.
48 */
49 #define XENMEMF_address_bits(x) (x)
50 #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
51 /* NUMA node to allocate from. */
52 #define XENMEMF_node(x) (((x) + 1) << 8)
53 #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
54 /* Flag to populate physmap with populate-on-demand entries */
55 #define XENMEMF_populate_on_demand (1<<16)
56 /* Flag to request allocation only from the node specified */
57 #define XENMEMF_exact_node_request (1<<17)
58 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
59 /* Flag to indicate the node specified is virtual node */
60 #define XENMEMF_vnode (1<<18)
61 #endif
62
63 struct xen_memory_reservation {
64
65 /*
66 * XENMEM_increase_reservation:
67 * OUT: MFN (*not* GMFN) bases of extents that were allocated
68 * XENMEM_decrease_reservation:
69 * IN: GMFN bases of extents to free
70 * XENMEM_populate_physmap:
71 * IN: GPFN bases of extents to populate with memory
72 * OUT: GMFN bases of extents that were allocated
73 * (NB. This command also updates the mach_to_phys translation table)
74 * XENMEM_claim_pages:
75 * IN: must be zero
76 */
77 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
78
79 /* Number of extents, and size/alignment of each (2^extent_order pages). */
80 xen_ulong_t nr_extents;
81 unsigned int extent_order;
82
83 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
84 /* XENMEMF flags. */
85 unsigned int mem_flags;
86 #else
87 unsigned int address_bits;
88 #endif
89
90 /*
91 * Domain whose reservation is being changed.
92 * Unprivileged domains can specify only DOMID_SELF.
93 */
94 domid_t domid;
95 };
96 typedef struct xen_memory_reservation xen_memory_reservation_t;
97 DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
98
99 /*
100 * An atomic exchange of memory pages. If return code is zero then
101 * @out.extent_list provides GMFNs of the newly-allocated memory.
102 * Returns zero on complete success, otherwise a negative error code.
103 * On complete success then always @nr_exchanged == @in.nr_extents.
104 * On partial success @nr_exchanged indicates how much work was done.
105 */
106 #define XENMEM_exchange 11
107 struct xen_memory_exchange {
108 /*
109 * [IN] Details of memory extents to be exchanged (GMFN bases).
110 * Note that @in.address_bits is ignored and unused.
111 */
112 struct xen_memory_reservation in;
113
114 /*
115 * [IN/OUT] Details of new memory extents.
116 * We require that:
117 * 1. @in.domid == @out.domid
118 * 2. @in.nr_extents << @in.extent_order ==
119 * @out.nr_extents << @out.extent_order
120 * 3. @in.extent_start and @out.extent_start lists must not overlap
121 * 4. @out.extent_start lists GPFN bases to be populated
122 * 5. @out.extent_start is overwritten with allocated GMFN bases
123 */
124 struct xen_memory_reservation out;
125
126 /*
127 * [OUT] Number of input extents that were successfully exchanged:
128 * 1. The first @nr_exchanged input extents were successfully
129 * deallocated.
130 * 2. The corresponding first entries in the output extent list correctly
131 * indicate the GMFNs that were successfully exchanged.
132 * 3. All other input and output extents are untouched.
133 * 4. If not all input exents are exchanged then the return code of this
134 * command will be non-zero.
135 * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
136 */
137 xen_ulong_t nr_exchanged;
138 };
139 typedef struct xen_memory_exchange xen_memory_exchange_t;
140 DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
141
142 /*
143 * Returns the maximum machine frame number of mapped RAM in this system.
144 * This command always succeeds (it never returns an error code).
145 * arg == NULL.
146 */
147 #define XENMEM_maximum_ram_page 2
148
149 /*
150 * Returns the current or maximum memory reservation, in pages, of the
151 * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
152 * arg == addr of domid_t.
153 */
154 #define XENMEM_current_reservation 3
155 #define XENMEM_maximum_reservation 4
156
157 /*
158 * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
159 */
160 #define XENMEM_maximum_gpfn 14
161
162 /*
163 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
164 * mapping table. Architectures which do not have a m2p table do not implement
165 * this command.
166 * arg == addr of xen_machphys_mfn_list_t.
167 */
168 #define XENMEM_machphys_mfn_list 5
169 struct xen_machphys_mfn_list {
170 /*
171 * Size of the 'extent_start' array. Fewer entries will be filled if the
172 * machphys table is smaller than max_extents * 2MB.
173 */
174 unsigned int max_extents;
175
176 /*
177 * Pointer to buffer to fill with list of extent starts. If there are
178 * any large discontiguities in the machine address space, 2MB gaps in
179 * the machphys table will be represented by an MFN base of zero.
180 */
181 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
182
183 /*
184 * Number of extents written to the above array. This will be smaller
185 * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
186 */
187 unsigned int nr_extents;
188 };
189 typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
190 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
191
192 /*
193 * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
194 *
195 * For a non compat caller, this functions similarly to
196 * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
197 * m2p table.
198 */
199 #define XENMEM_machphys_compat_mfn_list 25
200
201 /*
202 * Returns the location in virtual address space of the machine_to_phys
203 * mapping table. Architectures which do not have a m2p table, or which do not
204 * map it by default into guest address space, do not implement this command.
205 * arg == addr of xen_machphys_mapping_t.
206 */
207 #define XENMEM_machphys_mapping 12
208 struct xen_machphys_mapping {
209 xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
210 xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
211 };
212 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
213 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
214
215 /* Source mapping space. */
216 /* ` enum phys_map_space { */
217 #define XENMAPSPACE_shared_info 0 /* shared info page */
218 #define XENMAPSPACE_grant_table 1 /* grant table page */
219 #define XENMAPSPACE_gmfn 2 /* GMFN */
220 #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
221 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
222 * XENMEM_add_to_physmap_batch only. */
223 /* ` } */
224
225 /*
226 * Sets the GPFN at which a particular page appears in the specified guest's
227 * pseudophysical address space.
228 * arg == addr of xen_add_to_physmap_t.
229 */
230 #define XENMEM_add_to_physmap 7
231 struct xen_add_to_physmap {
232 /* Which domain to change the mapping for. */
233 domid_t domid;
234
235 /* Number of pages to go through for gmfn_range */
236 uint16_t size;
237
238 unsigned int space; /* => enum phys_map_space */
239
240 #define XENMAPIDX_grant_table_status 0x80000000
241
242 /* Index into space being mapped. */
243 xen_ulong_t idx;
244
245 /* GPFN in domid where the source mapping page should appear. */
246 xen_pfn_t gpfn;
247 };
248 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
249 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
250
251 /* A batched version of add_to_physmap. */
252 #define XENMEM_add_to_physmap_batch 23
253 struct xen_add_to_physmap_batch {
254 /* IN */
255 /* Which domain to change the mapping for. */
256 domid_t domid;
257 uint16_t space; /* => enum phys_map_space */
258
259 /* Number of pages to go through */
260 uint16_t size;
261 domid_t foreign_domid; /* IFF gmfn_foreign */
262
263 /* Indexes into space being mapped. */
264 XEN_GUEST_HANDLE(xen_ulong_t) idxs;
265
266 /* GPFN in domid where the source mapping page should appear. */
267 XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
268
269 /* OUT */
270
271 /* Per index error code. */
272 XEN_GUEST_HANDLE(int) errs;
273 };
274 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
275 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
276
277 #if __XEN_INTERFACE_VERSION__ < 0x00040400
278 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
279 #define xen_add_to_physmap_range xen_add_to_physmap_batch
280 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
281 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
282 #endif
283
284 /*
285 * Unmaps the page appearing at a particular GPFN from the specified guest's
286 * pseudophysical address space.
287 * arg == addr of xen_remove_from_physmap_t.
288 */
289 #define XENMEM_remove_from_physmap 15
290 struct xen_remove_from_physmap {
291 /* Which domain to change the mapping for. */
292 domid_t domid;
293
294 /* GPFN of the current mapping of the page. */
295 xen_pfn_t gpfn;
296 };
297 typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
298 DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
299
300 /*** REMOVED ***/
301 /*#define XENMEM_translate_gpfn_list 8*/
302
303 /*
304 * Returns the pseudo-physical memory map as it was when the domain
305 * was started (specified by XENMEM_set_memory_map).
306 * arg == addr of xen_memory_map_t.
307 */
308 #define XENMEM_memory_map 9
309 struct xen_memory_map {
310 /*
311 * On call the number of entries which can be stored in buffer. On
312 * return the number of entries which have been stored in
313 * buffer.
314 */
315 unsigned int nr_entries;
316
317 /*
318 * Entries in the buffer are in the same format as returned by the
319 * BIOS INT 0x15 EAX=0xE820 call.
320 */
321 XEN_GUEST_HANDLE(void) buffer;
322 };
323 typedef struct xen_memory_map xen_memory_map_t;
324 DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
325
326 /*
327 * Returns the real physical memory map. Passes the same structure as
328 * XENMEM_memory_map.
329 * arg == addr of xen_memory_map_t.
330 */
331 #define XENMEM_machine_memory_map 10
332
333 /*
334 * Set the pseudo-physical memory map of a domain, as returned by
335 * XENMEM_memory_map.
336 * arg == addr of xen_foreign_memory_map_t.
337 */
338 #define XENMEM_set_memory_map 13
339 struct xen_foreign_memory_map {
340 domid_t domid;
341 struct xen_memory_map map;
342 };
343 typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
344 DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
345
346 #define XENMEM_set_pod_target 16
347 #define XENMEM_get_pod_target 17
348 struct xen_pod_target {
349 /* IN */
350 uint64_t target_pages;
351 /* OUT */
352 uint64_t tot_pages;
353 uint64_t pod_cache_pages;
354 uint64_t pod_entries;
355 /* IN */
356 domid_t domid;
357 };
358 typedef struct xen_pod_target xen_pod_target_t;
359
360 #if defined(__XEN__) || defined(__XEN_TOOLS__)
361
362 #ifndef uint64_aligned_t
363 #define uint64_aligned_t uint64_t
364 #endif
365
366 /*
367 * Get the number of MFNs saved through memory sharing.
368 * The call never fails.
369 */
370 #define XENMEM_get_sharing_freed_pages 18
371 #define XENMEM_get_sharing_shared_pages 19
372
373 #define XENMEM_paging_op 20
374 #define XENMEM_paging_op_nominate 0
375 #define XENMEM_paging_op_evict 1
376 #define XENMEM_paging_op_prep 2
377
378 struct xen_mem_paging_op {
379 uint8_t op; /* XENMEM_paging_op_* */
380 domid_t domain;
381
382 /* PAGING_PREP IN: buffer to immediately fill page in */
383 uint64_aligned_t buffer;
384 /* Other OPs */
385 uint64_aligned_t gfn; /* IN: gfn of page being operated on */
386 };
387 typedef struct xen_mem_paging_op xen_mem_paging_op_t;
388 DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
389
390 #define XENMEM_access_op 21
391 #define XENMEM_access_op_set_access 0
392 #define XENMEM_access_op_get_access 1
393 #define XENMEM_access_op_enable_emulate 2
394 #define XENMEM_access_op_disable_emulate 3
395
396 typedef enum {
397 XENMEM_access_n,
398 XENMEM_access_r,
399 XENMEM_access_w,
400 XENMEM_access_rw,
401 XENMEM_access_x,
402 XENMEM_access_rx,
403 XENMEM_access_wx,
404 XENMEM_access_rwx,
405 /*
406 * Page starts off as r-x, but automatically
407 * change to r-w on a write
408 */
409 XENMEM_access_rx2rw,
410 /*
411 * Log access: starts off as n, automatically
412 * goes to rwx, generating an event without
413 * pausing the vcpu
414 */
415 XENMEM_access_n2rwx,
416 /* Take the domain default */
417 XENMEM_access_default
418 } xenmem_access_t;
419
420 struct xen_mem_access_op {
421 /* XENMEM_access_op_* */
422 uint8_t op;
423 /* xenmem_access_t */
424 uint8_t access;
425 domid_t domid;
426 /*
427 * Number of pages for set op
428 * Ignored on setting default access and other ops
429 */
430 uint32_t nr;
431 /*
432 * First pfn for set op
433 * pfn for get op
434 * ~0ull is used to set and get the default access for pages
435 */
436 uint64_aligned_t pfn;
437 };
438 typedef struct xen_mem_access_op xen_mem_access_op_t;
439 DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
440
441 #define XENMEM_sharing_op 22
442 #define XENMEM_sharing_op_nominate_gfn 0
443 #define XENMEM_sharing_op_nominate_gref 1
444 #define XENMEM_sharing_op_share 2
445 #define XENMEM_sharing_op_debug_gfn 3
446 #define XENMEM_sharing_op_debug_mfn 4
447 #define XENMEM_sharing_op_debug_gref 5
448 #define XENMEM_sharing_op_add_physmap 6
449 #define XENMEM_sharing_op_audit 7
450
451 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
452 #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
453
454 /* The following allows sharing of grant refs. This is useful
455 * for sharing utilities sitting as "filters" in IO backends
456 * (e.g. memshr + blktap(2)). The IO backend is only exposed
457 * to grant references, and this allows sharing of the grefs */
458 #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62)
459
460 #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
461 (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
462 #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
463 ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
464 #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
465 ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
466
467 struct xen_mem_sharing_op {
468 uint8_t op; /* XENMEM_sharing_op_* */
469 domid_t domain;
470
471 union {
472 struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
473 union {
474 uint64_aligned_t gfn; /* IN: gfn to nominate */
475 uint32_t grant_ref; /* IN: grant ref to nominate */
476 } u;
477 uint64_aligned_t handle; /* OUT: the handle */
478 } nominate;
479 struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
480 uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
481 uint64_aligned_t source_handle; /* IN: handle to the source page */
482 uint64_aligned_t client_gfn; /* IN: the client gfn */
483 uint64_aligned_t client_handle; /* IN: handle to the client page */
484 domid_t client_domain; /* IN: the client domain id */
485 } share;
486 struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
487 union {
488 uint64_aligned_t gfn; /* IN: gfn to debug */
489 uint64_aligned_t mfn; /* IN: mfn to debug */
490 uint32_t gref; /* IN: gref to debug */
491 } u;
492 } debug;
493 } u;
494 };
495 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
496 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
497
498 /*
499 * Attempt to stake a claim for a domain on a quantity of pages
500 * of system RAM, but _not_ assign specific pageframes. Only
501 * arithmetic is performed so the hypercall is very fast and need
502 * not be preemptible, thus sidestepping time-of-check-time-of-use
503 * races for memory allocation. Returns 0 if the hypervisor page
504 * allocator has atomically and successfully claimed the requested
505 * number of pages, else non-zero.
506 *
507 * Any domain may have only one active claim. When sufficient memory
508 * has been allocated to resolve the claim, the claim silently expires.
509 * Claiming zero pages effectively resets any outstanding claim and
510 * is always successful.
511 *
512 * Note that a valid claim may be staked even after memory has been
513 * allocated for a domain. In this case, the claim is not incremental,
514 * i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
515 * only 7 additional pages are claimed.
516 *
517 * Caller must be privileged or the hypercall fails.
518 */
519 #define XENMEM_claim_pages 24
520
521 /*
522 * XENMEM_claim_pages flags - the are no flags at this time.
523 * The zero value is appropiate.
524 */
525
526 /*
527 * With some legacy devices, certain guest-physical addresses cannot safely
528 * be used for other purposes, e.g. to map guest RAM. This hypercall
529 * enumerates those regions so the toolstack can avoid using them.
530 */
531 #define XENMEM_reserved_device_memory_map 27
532 struct xen_reserved_device_memory {
533 xen_pfn_t start_pfn;
534 xen_ulong_t nr_pages;
535 };
536 typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
537 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
538
539 struct xen_reserved_device_memory_map {
540 #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
541 /* IN */
542 uint32_t flags;
543 /*
544 * IN/OUT
545 *
546 * Gets set to the required number of entries when too low,
547 * signaled by error code -ERANGE.
548 */
549 unsigned int nr_entries;
550 /* OUT */
551 XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
552 /* IN */
553 union {
554 struct physdev_pci_device pci;
555 } dev;
556 };
557 typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
558 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
559
560 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
561
562 /*
563 * XENMEM_get_vnumainfo used by guest to get
564 * vNUMA topology from hypervisor.
565 */
566 #define XENMEM_get_vnumainfo 26
567
568 /* vNUMA node memory ranges */
569 struct xen_vmemrange {
570 uint64_t start, end;
571 unsigned int flags;
572 unsigned int nid;
573 };
574 typedef struct xen_vmemrange xen_vmemrange_t;
575 DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
576
577 /*
578 * vNUMA topology specifies vNUMA node number, distance table,
579 * memory ranges and vcpu mapping provided for guests.
580 * XENMEM_get_vnumainfo hypercall expects to see from guest
581 * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
582 * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
583 * copied back to guest. Domain returns expected values of nr_vnodes,
584 * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
585 */
586 struct xen_vnuma_topology_info {
587 /* IN */
588 domid_t domid;
589 uint16_t pad;
590 /* IN/OUT */
591 unsigned int nr_vnodes;
592 unsigned int nr_vcpus;
593 unsigned int nr_vmemranges;
594 /* OUT */
595 union {
596 XEN_GUEST_HANDLE(uint) h;
597 uint64_t pad;
598 } vdistance;
599 union {
600 XEN_GUEST_HANDLE(uint) h;
601 uint64_t pad;
602 } vcpu_to_vnode;
603 union {
604 XEN_GUEST_HANDLE(xen_vmemrange_t) h;
605 uint64_t pad;
606 } vmemrange;
607 };
608 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
609 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
610
611 /* Next available subop number is 28 */
612
613 #endif /* __XEN_PUBLIC_MEMORY_H__ */
614
615 /*
616 * Local variables:
617 * mode: C
618 * c-file-style: "BSD"
619 * c-basic-offset: 4
620 * tab-width: 4
621 * indent-tabs-mode: nil
622 * End:
623 */
Cache object: fa32a5c2adc0fb9f095052baad99084d
|