1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define _MACHINE_CPUFUNC_H_
33
34 #ifdef _KERNEL
35
36 #include <sys/types.h>
37
38 #include <machine/psl.h>
39 #include <machine/spr.h>
40
41 struct thread;
42
43 #ifdef KDB
44 void breakpoint(void);
45 #else
46 static __inline void
47 breakpoint(void)
48 {
49
50 return;
51 }
52 #endif
53
54 /* CPU register mangling inlines */
55
56 static __inline void
57 mtmsr(register_t value)
58 {
59
60 __asm __volatile ("mtmsr %0; isync" :: "r"(value));
61 }
62
63 #ifdef __powerpc64__
64 static __inline void
65 mtmsrd(register_t value)
66 {
67
68 __asm __volatile ("mtmsrd %0; isync" :: "r"(value));
69 }
70 #endif
71
72 static __inline register_t
73 mfmsr(void)
74 {
75 register_t value;
76
77 __asm __volatile ("mfmsr %0" : "=r"(value));
78
79 return (value);
80 }
81
82 #ifndef __powerpc64__
83 static __inline void
84 mtsrin(vm_offset_t va, register_t value)
85 {
86
87 __asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
88 }
89
90 static __inline register_t
91 mfsrin(vm_offset_t va)
92 {
93 register_t value;
94
95 __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
96
97 return (value);
98 }
99 #endif
100
101 static __inline register_t
102 mfctrl(void)
103 {
104 register_t value;
105
106 __asm __volatile ("mfspr %0,136" : "=r"(value));
107
108 return (value);
109 }
110
111 static __inline void
112 mtdec(register_t value)
113 {
114
115 __asm __volatile ("mtdec %0" :: "r"(value));
116 }
117
118 static __inline register_t
119 mfdec(void)
120 {
121 register_t value;
122
123 __asm __volatile ("mfdec %0" : "=r"(value));
124
125 return (value);
126 }
127
128 static __inline uint32_t
129 mfpvr(void)
130 {
131 uint32_t value;
132
133 __asm __volatile ("mfpvr %0" : "=r"(value));
134
135 return (value);
136 }
137
138 static __inline u_quad_t
139 mftb(void)
140 {
141 u_quad_t tb;
142 #ifdef __powerpc64__
143 __asm __volatile ("mftb %0" : "=r"(tb));
144 #else
145 uint32_t *tbup = (uint32_t *)&tb;
146 uint32_t *tblp = tbup + 1;
147
148 do {
149 *tbup = mfspr(TBR_TBU);
150 *tblp = mfspr(TBR_TBL);
151 } while (*tbup != mfspr(TBR_TBU));
152 #endif
153
154 return (tb);
155 }
156
157 static __inline void
158 mttb(u_quad_t time)
159 {
160
161 mtspr(TBR_TBWL, 0);
162 mtspr(TBR_TBWU, (uint32_t)(time >> 32));
163 mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
164 }
165
166 static __inline register_t
167 mffs(void)
168 {
169 uint64_t value;
170
171 __asm __volatile ("mffs 0; stfd 0,0(%0)"
172 :: "b"(&value));
173
174 return ((register_t)value);
175 }
176
177 static __inline void
178 mtfsf(uint64_t value)
179 {
180
181 __asm __volatile ("lfd 0,0(%0); mtfsf 0xff,0"
182 :: "b"(&value));
183 }
184
185 static __inline void
186 eieio(void)
187 {
188
189 __asm __volatile ("eieio" : : : "memory");
190 }
191
192 static __inline void
193 isync(void)
194 {
195
196 __asm __volatile ("isync" : : : "memory");
197 }
198
199 static __inline void
200 powerpc_sync(void)
201 {
202
203 __asm __volatile ("sync" : : : "memory");
204 }
205
206 static __inline int
207 cntlzd(uint64_t word)
208 {
209 uint64_t result;
210 /* cntlzd %0, %1 */
211 __asm __volatile(".long 0x7c000074 | (%1 << 21) | (%0 << 16)" :
212 "=r"(result) : "r"(word));
213
214 return (int)result;
215 }
216
217 static __inline int
218 cnttzd(uint64_t word)
219 {
220 uint64_t result;
221 /* cnttzd %0, %1 */
222 __asm __volatile(".long 0x7c000474 | (%1 << 21) | (%0 << 16)" :
223 "=r"(result) : "r"(word));
224
225 return (int)result;
226 }
227
228 static __inline void
229 ptesync(void)
230 {
231 __asm __volatile("ptesync");
232 }
233
234 static __inline register_t
235 intr_disable(void)
236 {
237 register_t msr;
238
239 msr = mfmsr();
240 mtmsr(msr & ~PSL_EE);
241 return (msr);
242 }
243
244 static __inline void
245 intr_restore(register_t msr)
246 {
247
248 mtmsr(msr);
249 }
250
251 static __inline struct pcpu *
252 get_pcpu(void)
253 {
254 struct pcpu *ret;
255
256 __asm __volatile("mfsprg %0, 0" : "=r"(ret));
257
258 return (ret);
259 }
260
261 #define HAVE_INLINE_FLS
262 static __inline __pure2 int
263 fls(int mask)
264 {
265 return (mask ? 32 - __builtin_clz(mask) : 0);
266 }
267
268 #define HAVE_INLINE_FLSL
269 static __inline __pure2 int
270 flsl(long mask)
271 {
272 return (mask ? (8 * sizeof(long) - __builtin_clzl(mask)) : 0);
273 }
274
275 /* "NOP" operations to signify priorities to the kernel. */
276 static __inline void
277 nop_prio_vlow(void)
278 {
279 __asm __volatile("or 31,31,31");
280 }
281
282 static __inline void
283 nop_prio_low(void)
284 {
285 __asm __volatile("or 1,1,1");
286 }
287
288 static __inline void
289 nop_prio_mlow(void)
290 {
291 __asm __volatile("or 6,6,6");
292 }
293
294 static __inline void
295 nop_prio_medium(void)
296 {
297 __asm __volatile("or 2,2,2");
298 }
299
300 static __inline void
301 nop_prio_mhigh(void)
302 {
303 __asm __volatile("or 5,5,5");
304 }
305
306 static __inline void
307 nop_prio_high(void)
308 {
309 __asm __volatile("or 3,3,3");
310 }
311
312 #endif /* _KERNEL */
313
314 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: fc2b5c2aee0e0cba371393126fb29bf5
|