1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) Peter Wemm <peter@netplex.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifdef __i386__
32 #include <i386/pcpu.h>
33 #else /* !__i386__ */
34
35 #ifndef _MACHINE_PCPU_H_
36 #define _MACHINE_PCPU_H_
37
38 #include <machine/segments.h>
39 #include <machine/tss.h>
40
41 #define PC_PTI_STACK_SZ 16
42
43 struct monitorbuf {
44 int idle_state; /* Used by cpu_idle_mwait. */
45 int stop_state; /* Used by cpustop_handler. */
46 char padding[128 - (2 * sizeof(int))];
47 };
48 _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
49
50 /*
51 * The SMP parts are setup in pmap.c and locore.s for the BSP, and
52 * mp_machdep.c sets up the data for the AP's to "see" when they awake.
53 * The reason for doing it via a struct is so that an array of pointers
54 * to each CPU's data can be set up for things like "check curproc on all
55 * other processors"
56 */
57 #define PCPU_MD_FIELDS \
58 struct monitorbuf pc_monitorbuf __aligned(128); /* cache line */\
59 struct pcpu *pc_prvspace; /* Self-reference */ \
60 struct pmap *pc_curpmap; \
61 struct amd64tss *pc_tssp; /* TSS segment active on CPU */ \
62 void *pc_pad0; \
63 uint64_t pc_kcr3; \
64 uint64_t pc_ucr3; \
65 uint64_t pc_saved_ucr3; \
66 register_t pc_rsp0; \
67 register_t pc_scratch_rsp; /* User %rsp in syscall */ \
68 register_t pc_scratch_rax; \
69 u_int pc_apic_id; \
70 u_int pc_acpi_id; /* ACPI CPU id */ \
71 /* Pointer to the CPU %fs descriptor */ \
72 struct user_segment_descriptor *pc_fs32p; \
73 /* Pointer to the CPU %gs descriptor */ \
74 struct user_segment_descriptor *pc_gs32p; \
75 /* Pointer to the CPU LDT descriptor */ \
76 struct system_segment_descriptor *pc_ldt; \
77 /* Pointer to the CPU TSS descriptor */ \
78 struct system_segment_descriptor *pc_tss; \
79 u_int pc_cmci_mask; /* MCx banks for CMCI */ \
80 uint64_t pc_dbreg[16]; /* ddb debugging regs */ \
81 uint64_t pc_pti_stack[PC_PTI_STACK_SZ]; \
82 register_t pc_pti_rsp0; \
83 int pc_dbreg_cmd; /* ddb debugging reg cmd */ \
84 u_int pc_vcpu_id; /* Xen vCPU ID */ \
85 uint32_t pc_pcid_next; \
86 uint32_t pc_pcid_gen; \
87 uint32_t pc_unused; \
88 uint32_t pc_ibpb_set; \
89 void *pc_mds_buf; \
90 void *pc_mds_buf64; \
91 uint32_t pc_pad[4]; \
92 uint8_t pc_mds_tmp[64]; \
93 u_int pc_ipi_bitmap; \
94 struct amd64tss pc_common_tss; \
95 struct user_segment_descriptor pc_gdt[NGDT]; \
96 void *pc_smp_tlb_pmap; \
97 uint64_t pc_smp_tlb_addr1; \
98 uint64_t pc_smp_tlb_addr2; \
99 uint32_t pc_smp_tlb_gen; \
100 u_int pc_smp_tlb_op; \
101 uint64_t pc_ucr3_load_mask; \
102 u_int pc_small_core; \
103 u_int pc_pcid_invlpg_workaround; \
104 char __pad[2908] /* pad to UMA_PCPU_ALLOC_SIZE */
105
106 #define PC_DBREG_CMD_NONE 0
107 #define PC_DBREG_CMD_LOAD 1
108
109 #ifdef _KERNEL
110
111 #define MONITOR_STOPSTATE_RUNNING 0
112 #define MONITOR_STOPSTATE_STOPPED 1
113
114 /*
115 * Evaluates to the byte offset of the per-cpu variable name.
116 */
117 #define __pcpu_offset(name) \
118 __offsetof(struct pcpu, name)
119
120 /*
121 * Evaluates to the type of the per-cpu variable name.
122 */
123 #define __pcpu_type(name) \
124 __typeof(((struct pcpu *)0)->name)
125
126 /*
127 * Evaluates to the address of the per-cpu variable name.
128 */
129 #define __PCPU_PTR(name) __extension__ ({ \
130 __pcpu_type(name) *__p; \
131 \
132 __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \
133 : "=r" (__p) \
134 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \
135 "i" (__pcpu_offset(name))); \
136 \
137 __p; \
138 })
139
140 /*
141 * Evaluates to the value of the per-cpu variable name.
142 */
143 #define __PCPU_GET(name) __extension__ ({ \
144 __pcpu_type(name) __res; \
145 struct __s { \
146 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
147 } __s; \
148 \
149 if (sizeof(__res) == 1 || sizeof(__res) == 2 || \
150 sizeof(__res) == 4 || sizeof(__res) == 8) { \
151 __asm __volatile("mov %%gs:%1,%0" \
152 : "=r" (__s) \
153 : "m" (*(struct __s *)(__pcpu_offset(name)))); \
154 *(struct __s *)(void *)&__res = __s; \
155 } else { \
156 __res = *__PCPU_PTR(name); \
157 } \
158 __res; \
159 })
160
161 /*
162 * Adds the value to the per-cpu counter name. The implementation
163 * must be atomic with respect to interrupts.
164 */
165 #define __PCPU_ADD(name, val) do { \
166 __pcpu_type(name) __val; \
167 struct __s { \
168 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
169 } __s; \
170 \
171 __val = (val); \
172 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
173 sizeof(__val) == 4 || sizeof(__val) == 8) { \
174 __s = *(struct __s *)(void *)&__val; \
175 __asm __volatile("add %1,%%gs:%0" \
176 : "=m" (*(struct __s *)(__pcpu_offset(name))) \
177 : "r" (__s)); \
178 } else \
179 *__PCPU_PTR(name) += __val; \
180 } while (0)
181
182 /*
183 * Sets the value of the per-cpu variable name to value val.
184 */
185 #define __PCPU_SET(name, val) { \
186 __pcpu_type(name) __val; \
187 struct __s { \
188 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
189 } __s; \
190 \
191 __val = (val); \
192 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
193 sizeof(__val) == 4 || sizeof(__val) == 8) { \
194 __s = *(struct __s *)(void *)&__val; \
195 __asm __volatile("mov %1,%%gs:%0" \
196 : "=m" (*(struct __s *)(__pcpu_offset(name))) \
197 : "r" (__s)); \
198 } else { \
199 *__PCPU_PTR(name) = __val; \
200 } \
201 }
202
203 #define get_pcpu() __extension__ ({ \
204 struct pcpu *__pc; \
205 \
206 __asm __volatile("movq %%gs:%1,%0" \
207 : "=r" (__pc) \
208 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \
209 __pc; \
210 })
211
212 #define PCPU_GET(member) __PCPU_GET(pc_ ## member)
213 #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
214 #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
215 #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
216
217 #define IS_BSP() (PCPU_GET(cpuid) == 0)
218
219 #define zpcpu_offset_cpu(cpu) ((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu)
220 #define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0])
221 #define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0])
222
223 #define zpcpu_sub_protected(base, n) do { \
224 ZPCPU_ASSERT_PROTECTED(); \
225 zpcpu_sub(base, n); \
226 } while (0)
227
228 #define zpcpu_set_protected(base, n) do { \
229 __typeof(*base) __n = (n); \
230 ZPCPU_ASSERT_PROTECTED(); \
231 switch (sizeof(*base)) { \
232 case 4: \
233 __asm __volatile("movl\t%1,%%gs:(%0)" \
234 : : "r" (base), "ri" (__n) : "memory", "cc"); \
235 break; \
236 case 8: \
237 __asm __volatile("movq\t%1,%%gs:(%0)" \
238 : : "r" (base), "ri" (__n) : "memory", "cc"); \
239 break; \
240 default: \
241 *zpcpu_get(base) = __n; \
242 } \
243 } while (0);
244
245 #define zpcpu_add(base, n) do { \
246 __typeof(*base) __n = (n); \
247 CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \
248 switch (sizeof(*base)) { \
249 case 4: \
250 __asm __volatile("addl\t%1,%%gs:(%0)" \
251 : : "r" (base), "ri" (__n) : "memory", "cc"); \
252 break; \
253 case 8: \
254 __asm __volatile("addq\t%1,%%gs:(%0)" \
255 : : "r" (base), "ri" (__n) : "memory", "cc"); \
256 break; \
257 } \
258 } while (0)
259
260 #define zpcpu_add_protected(base, n) do { \
261 ZPCPU_ASSERT_PROTECTED(); \
262 zpcpu_add(base, n); \
263 } while (0)
264
265 #define zpcpu_sub(base, n) do { \
266 __typeof(*base) __n = (n); \
267 CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \
268 switch (sizeof(*base)) { \
269 case 4: \
270 __asm __volatile("subl\t%1,%%gs:(%0)" \
271 : : "r" (base), "ri" (__n) : "memory", "cc"); \
272 break; \
273 case 8: \
274 __asm __volatile("subq\t%1,%%gs:(%0)" \
275 : : "r" (base), "ri" (__n) : "memory", "cc"); \
276 break; \
277 } \
278 } while (0);
279
280 #endif /* _KERNEL */
281
282 #endif /* !_MACHINE_PCPU_H_ */
283
284 #endif /* __i386__ */
Cache object: 10b4566069db859e2b1477fe4a0443c7
|