1 /*-
2 * Copyright (c) Peter Wemm <peter@netplex.com.au>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef _MACHINE_PCPU_H_
30 #define _MACHINE_PCPU_H_
31
32 #ifndef _SYS_CDEFS_H_
33 #error "sys/cdefs.h is a prerequisite for this file"
34 #endif
35
36 /*
37 * The SMP parts are setup in pmap.c and locore.s for the BSP, and
38 * mp_machdep.c sets up the data for the AP's to "see" when they awake.
39 * The reason for doing it via a struct is so that an array of pointers
40 * to each CPU's data can be set up for things like "check curproc on all
41 * other processors"
42 */
43 #define PCPU_MD_FIELDS \
44 struct pcpu *pc_prvspace; /* Self-reference */ \
45 struct pmap *pc_curpmap; \
46 struct amd64tss *pc_tssp; \
47 register_t pc_rsp0; \
48 register_t pc_scratch_rsp; /* User %rsp in syscall */ \
49 u_int pc_apic_id; \
50 u_int pc_acpi_id; /* ACPI CPU id */ \
51 struct user_segment_descriptor *pc_gs32p
52
53 #ifdef _KERNEL
54
55 #ifdef lint
56
57 extern struct pcpu *pcpup;
58
59 #define PCPU_GET(member) (pcpup->pc_ ## member)
60 #define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val))
61 #define PCPU_INC(member) PCPU_ADD(member, 1)
62 #define PCPU_PTR(member) (&pcpup->pc_ ## member)
63 #define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
64
65 #elif defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF)
66
67 /*
68 * Evaluates to the byte offset of the per-cpu variable name.
69 */
70 #define __pcpu_offset(name) \
71 __offsetof(struct pcpu, name)
72
73 /*
74 * Evaluates to the type of the per-cpu variable name.
75 */
76 #define __pcpu_type(name) \
77 __typeof(((struct pcpu *)0)->name)
78
79 /*
80 * Evaluates to the address of the per-cpu variable name.
81 */
82 #define __PCPU_PTR(name) __extension__ ({ \
83 __pcpu_type(name) *__p; \
84 \
85 __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \
86 : "=r" (__p) \
87 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \
88 "i" (__pcpu_offset(name))); \
89 \
90 __p; \
91 })
92
93 /*
94 * Evaluates to the value of the per-cpu variable name.
95 */
96 #define __PCPU_GET(name) __extension__ ({ \
97 __pcpu_type(name) __res; \
98 struct __s { \
99 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
100 } __s; \
101 \
102 if (sizeof(__res) == 1 || sizeof(__res) == 2 || \
103 sizeof(__res) == 4 || sizeof(__res) == 8) { \
104 __asm __volatile("mov %%gs:%1,%0" \
105 : "=r" (__s) \
106 : "m" (*(struct __s *)(__pcpu_offset(name)))); \
107 *(struct __s *)(void *)&__res = __s; \
108 } else { \
109 __res = *__PCPU_PTR(name); \
110 } \
111 __res; \
112 })
113
114 /*
115 * Adds the value to the per-cpu counter name. The implementation
116 * must be atomic with respect to interrupts.
117 */
118 #define __PCPU_ADD(name, val) do { \
119 __pcpu_type(name) __val; \
120 struct __s { \
121 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
122 } __s; \
123 \
124 __val = (val); \
125 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
126 sizeof(__val) == 4 || sizeof(__val) == 8) { \
127 __s = *(struct __s *)(void *)&__val; \
128 __asm __volatile("add %1,%%gs:%0" \
129 : "=m" (*(struct __s *)(__pcpu_offset(name))) \
130 : "r" (__s)); \
131 } else \
132 *__PCPU_PTR(name) += __val; \
133 } while (0)
134
135 /*
136 * Increments the value of the per-cpu counter name. The implementation
137 * must be atomic with respect to interrupts.
138 */
139 #define __PCPU_INC(name) do { \
140 CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
141 sizeof(__pcpu_type(name)) == 2 || \
142 sizeof(__pcpu_type(name)) == 4 || \
143 sizeof(__pcpu_type(name)) == 8); \
144 if (sizeof(__pcpu_type(name)) == 1) { \
145 __asm __volatile("incb %%gs:%0" \
146 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
147 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
148 } else if (sizeof(__pcpu_type(name)) == 2) { \
149 __asm __volatile("incw %%gs:%0" \
150 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
151 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
152 } else if (sizeof(__pcpu_type(name)) == 4) { \
153 __asm __volatile("incl %%gs:%0" \
154 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
155 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
156 } else if (sizeof(__pcpu_type(name)) == 8) { \
157 __asm __volatile("incq %%gs:%0" \
158 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
159 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
160 } \
161 } while (0)
162
163 /*
164 * Sets the value of the per-cpu variable name to value val.
165 */
166 #define __PCPU_SET(name, val) { \
167 __pcpu_type(name) __val; \
168 struct __s { \
169 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
170 } __s; \
171 \
172 __val = (val); \
173 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
174 sizeof(__val) == 4 || sizeof(__val) == 8) { \
175 __s = *(struct __s *)(void *)&__val; \
176 __asm __volatile("mov %1,%%gs:%0" \
177 : "=m" (*(struct __s *)(__pcpu_offset(name))) \
178 : "r" (__s)); \
179 } else { \
180 *__PCPU_PTR(name) = __val; \
181 } \
182 }
183
184 #define PCPU_GET(member) __PCPU_GET(pc_ ## member)
185 #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
186 #define PCPU_INC(member) __PCPU_INC(pc_ ## member)
187 #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
188 #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
189
190 static __inline struct thread *
191 __curthread(void)
192 {
193 struct thread *td;
194
195 __asm __volatile("movq %%gs:0,%0" : "=r" (td));
196 return (td);
197 }
198 #define curthread (__curthread())
199
200 #else /* !lint || defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) */
201
202 #error "this file needs to be ported to your compiler"
203
204 #endif /* lint, etc. */
205
206 #endif /* _KERNEL */
207
208 #endif /* !_MACHINE_PCPU_H_ */
Cache object: 059f562c8bc01c3769c174ba5d9dd629
|