1 /*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
21 */
22
23 #ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__
24 #define __XEN_PUBLIC_ARCH_X86_PMU_H__
25
26 /* x86-specific PMU definitions */
27
28 /* AMD PMU registers and structures */
29 struct xen_pmu_amd_ctxt {
30 /*
31 * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
32 * For PV(H) guests these fields are RO.
33 */
34 uint32_t counters;
35 uint32_t ctrls;
36
37 /* Counter MSRs */
38 uint64_t regs[XEN_FLEX_ARRAY_DIM];
39 };
40 typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t;
41 DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t);
42
43 /* Intel PMU registers and structures */
44 struct xen_pmu_cntr_pair {
45 uint64_t counter;
46 uint64_t control;
47 };
48 typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t;
49 DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t);
50
51 struct xen_pmu_intel_ctxt {
52 /*
53 * Offsets to fixed and architectural counter MSRs (relative to
54 * xen_pmu_arch.c.intel).
55 * For PV(H) guests these fields are RO.
56 */
57 uint32_t fixed_counters;
58 uint32_t arch_counters;
59
60 /* PMU registers */
61 uint64_t global_ctrl;
62 uint64_t global_ovf_ctrl;
63 uint64_t global_status;
64 uint64_t fixed_ctrl;
65 uint64_t ds_area;
66 uint64_t pebs_enable;
67 uint64_t debugctl;
68
69 /* Fixed and architectural counter MSRs */
70 uint64_t regs[XEN_FLEX_ARRAY_DIM];
71 };
72 typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t;
73 DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t);
74
75 /* Sampled domain's registers */
76 struct xen_pmu_regs {
77 uint64_t ip;
78 uint64_t sp;
79 uint64_t flags;
80 uint16_t cs;
81 uint16_t ss;
82 uint8_t cpl;
83 uint8_t pad[3];
84 };
85 typedef struct xen_pmu_regs xen_pmu_regs_t;
86 DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t);
87
88 /* PMU flags */
89 #define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
90 #define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */
91 #define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */
92 #define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */
93
94 /*
95 * Architecture-specific information describing state of the processor at
96 * the time of PMU interrupt.
97 * Fields of this structure marked as RW for guest should only be written by
98 * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
99 * hypervisor during PMU interrupt). Hypervisor will read updated data in
100 * XENPMU_flush hypercall and clear PMU_CACHED bit.
101 */
102 struct xen_pmu_arch {
103 union {
104 /*
105 * Processor's registers at the time of interrupt.
106 * WO for hypervisor, RO for guests.
107 */
108 xen_pmu_regs_t regs;
109 /* Padding for adding new registers to xen_pmu_regs in the future */
110 #define XENPMU_REGS_PAD_SZ 64
111 uint8_t pad[XENPMU_REGS_PAD_SZ];
112 } r;
113
114 /* WO for hypervisor, RO for guest */
115 uint64_t pmu_flags;
116
117 /*
118 * APIC LVTPC register.
119 * RW for both hypervisor and guest.
120 * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
121 * during XENPMU_flush or XENPMU_lvtpc_set.
122 */
123 union {
124 uint32_t lapic_lvtpc;
125 uint64_t pad;
126 } l;
127
128 /*
129 * Vendor-specific PMU registers.
130 * RW for both hypervisor and guest (see exceptions above).
131 * Guest's updates to this field are verified and then loaded by the
132 * hypervisor into hardware during XENPMU_flush
133 */
134 union {
135 xen_pmu_amd_ctxt_t amd;
136 xen_pmu_intel_ctxt_t intel;
137
138 /*
139 * Padding for contexts (fixed parts only, does not include MSR banks
140 * that are specified by offsets)
141 */
142 #define XENPMU_CTXT_PAD_SZ 128
143 uint8_t pad[XENPMU_CTXT_PAD_SZ];
144 } c;
145 };
146 typedef struct xen_pmu_arch xen_pmu_arch_t;
147 DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t);
148
149 #endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */
150 /*
151 * Local variables:
152 * mode: C
153 * c-file-style: "BSD"
154 * c-basic-offset: 4
155 * tab-width: 4
156 * indent-tabs-mode: nil
157 * End:
158 */
159
Cache object: b920b761bf84b13ccf32ec897567f6a2
|