1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/11.0/sys/amd64/vmm/vmm_stat.h 283657 2015-05-28 17:37:01Z neel $
30 */
31
32 #ifndef _VMM_STAT_H_
33 #define _VMM_STAT_H_
34
35 struct vm;
36
37 #define MAX_VMM_STAT_ELEMS 64 /* arbitrary */
38
39 enum vmm_stat_scope {
40 VMM_STAT_SCOPE_ANY,
41 VMM_STAT_SCOPE_INTEL, /* Intel VMX specific statistic */
42 VMM_STAT_SCOPE_AMD, /* AMD SVM specific statistic */
43 };
44
45 struct vmm_stat_type;
46 typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
47 struct vmm_stat_type *stat);
48
49 struct vmm_stat_type {
50 int index; /* position in the stats buffer */
51 int nelems; /* standalone or array */
52 const char *desc; /* description of statistic */
53 vmm_stat_func_t func;
54 enum vmm_stat_scope scope;
55 };
56
57 void vmm_stat_register(void *arg);
58
59 #define VMM_STAT_FDEFINE(type, nelems, desc, func, scope) \
60 struct vmm_stat_type type[1] = { \
61 { -1, nelems, desc, func, scope } \
62 }; \
63 SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
64
65 #define VMM_STAT_DEFINE(type, nelems, desc, scope) \
66 VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
67
68 #define VMM_STAT_DECLARE(type) \
69 extern struct vmm_stat_type type[1]
70
71 #define VMM_STAT(type, desc) \
72 VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
73 #define VMM_STAT_INTEL(type, desc) \
74 VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
75 #define VMM_STAT_AMD(type, desc) \
76 VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
77
78 #define VMM_STAT_FUNC(type, desc, func) \
79 VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
80
81 #define VMM_STAT_ARRAY(type, nelems, desc) \
82 VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
83
84 void *vmm_stat_alloc(void);
85 void vmm_stat_init(void *vp);
86 void vmm_stat_free(void *vp);
87
88 /*
89 * 'buf' should be at least fit 'MAX_VMM_STAT_TYPES' entries
90 */
91 int vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf);
92 int vmm_stat_desc_copy(int index, char *buf, int buflen);
93
94 static void __inline
95 vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
96 int statidx, uint64_t x)
97 {
98 #ifdef VMM_KEEP_STATS
99 uint64_t *stats;
100
101 stats = vcpu_stats(vm, vcpu);
102
103 if (vst->index >= 0 && statidx < vst->nelems)
104 stats[vst->index + statidx] += x;
105 #endif
106 }
107
108 static void __inline
109 vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
110 int statidx, uint64_t val)
111 {
112 #ifdef VMM_KEEP_STATS
113 uint64_t *stats;
114
115 stats = vcpu_stats(vm, vcpu);
116
117 if (vst->index >= 0 && statidx < vst->nelems)
118 stats[vst->index + statidx] = val;
119 #endif
120 }
121
122 static void __inline
123 vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
124 {
125
126 #ifdef VMM_KEEP_STATS
127 vmm_stat_array_incr(vm, vcpu, vst, 0, x);
128 #endif
129 }
130
131 static void __inline
132 vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
133 {
134
135 #ifdef VMM_KEEP_STATS
136 vmm_stat_array_set(vm, vcpu, vst, 0, val);
137 #endif
138 }
139
140 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
141 VMM_STAT_DECLARE(VMEXIT_COUNT);
142 VMM_STAT_DECLARE(VMEXIT_EXTINT);
143 VMM_STAT_DECLARE(VMEXIT_HLT);
144 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
145 VMM_STAT_DECLARE(VMEXIT_RDMSR);
146 VMM_STAT_DECLARE(VMEXIT_WRMSR);
147 VMM_STAT_DECLARE(VMEXIT_MTRAP);
148 VMM_STAT_DECLARE(VMEXIT_PAUSE);
149 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
150 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
151 VMM_STAT_DECLARE(VMEXIT_INOUT);
152 VMM_STAT_DECLARE(VMEXIT_CPUID);
153 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
154 VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
155 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
156 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
157 VMM_STAT_DECLARE(VMEXIT_USERSPACE);
158 VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
159 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
160 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
161 #endif
Cache object: d2890d88f7aac5266e24b0b092f1b33f
|