1 /* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
2
3 /*-
4 * Copyright (c) 2002-2004 Juli Mallett. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 /*
28 * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Per Fogelstrom.
41 * 4. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD: releng/11.0/sys/mips/include/cpufunc.h 295138 2016-02-02 07:47:38Z adrian $
57 */
58
59 #ifndef _MACHINE_CPUFUNC_H_
60 #define _MACHINE_CPUFUNC_H_
61
62 #include <sys/types.h>
63 #include <machine/cpuregs.h>
64
65 /*
66 * These functions are required by user-land atomi ops
67 */
68
69 static __inline void
70 mips_barrier(void)
71 {
72 #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
73 __compiler_membar();
74 #else
75 __asm __volatile (".set noreorder\n\t"
76 "nop\n\t"
77 "nop\n\t"
78 "nop\n\t"
79 "nop\n\t"
80 "nop\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t"
84 ".set reorder\n\t"
85 : : : "memory");
86 #endif
87 }
88
89 static __inline void
90 mips_cp0_sync(void)
91 {
92 __asm __volatile (__XSTRING(COP0_SYNC));
93 }
94
95 static __inline void
96 mips_wbflush(void)
97 {
98 #if defined(CPU_CNMIPS)
99 __asm __volatile (".set noreorder\n\t"
100 "syncw\n\t"
101 ".set reorder\n"
102 : : : "memory");
103 #else
104 __asm __volatile ("sync" : : : "memory");
105 mips_barrier();
106 #endif
107 }
108
109 #ifdef _KERNEL
110 /*
111 * XXX
112 * It would be nice to add variants that read/write register_t, to avoid some
113 * ABI checks.
114 */
115 #if defined(__mips_n32) || defined(__mips_n64)
116 #define MIPS_RW64_COP0(n,r) \
117 static __inline uint64_t \
118 mips_rd_ ## n (void) \
119 { \
120 int v0; \
121 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \
122 : [v0] "=&r"(v0)); \
123 mips_barrier(); \
124 return (v0); \
125 } \
126 static __inline void \
127 mips_wr_ ## n (uint64_t a0) \
128 { \
129 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \
130 __XSTRING(COP0_SYNC)";" \
131 "nop;" \
132 "nop;" \
133 : \
134 : [a0] "r"(a0)); \
135 mips_barrier(); \
136 } struct __hack
137
138 #define MIPS_RW64_COP0_SEL(n,r,s) \
139 static __inline uint64_t \
140 mips_rd_ ## n(void) \
141 { \
142 int v0; \
143 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
144 : [v0] "=&r"(v0)); \
145 mips_barrier(); \
146 return (v0); \
147 } \
148 static __inline void \
149 mips_wr_ ## n(uint64_t a0) \
150 { \
151 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
152 __XSTRING(COP0_SYNC)";" \
153 : \
154 : [a0] "r"(a0)); \
155 mips_barrier(); \
156 } struct __hack
157
158 #if defined(__mips_n64)
159 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
160 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
161 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
162 #ifdef CPU_CNMIPS
163 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
164 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
165 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
166 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
167 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
168 #endif
169 #endif
170 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
171 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
172 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
173 #endif
174 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
175
176 #undef MIPS_RW64_COP0
177 #undef MIPS_RW64_COP0_SEL
178 #endif
179
180 #define MIPS_RW32_COP0(n,r) \
181 static __inline uint32_t \
182 mips_rd_ ## n (void) \
183 { \
184 int v0; \
185 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \
186 : [v0] "=&r"(v0)); \
187 mips_barrier(); \
188 return (v0); \
189 } \
190 static __inline void \
191 mips_wr_ ## n (uint32_t a0) \
192 { \
193 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \
194 __XSTRING(COP0_SYNC)";" \
195 "nop;" \
196 "nop;" \
197 : \
198 : [a0] "r"(a0)); \
199 mips_barrier(); \
200 } struct __hack
201
202 #define MIPS_RW32_COP0_SEL(n,r,s) \
203 static __inline uint32_t \
204 mips_rd_ ## n(void) \
205 { \
206 int v0; \
207 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
208 : [v0] "=&r"(v0)); \
209 mips_barrier(); \
210 return (v0); \
211 } \
212 static __inline void \
213 mips_wr_ ## n(uint32_t a0) \
214 { \
215 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
216 __XSTRING(COP0_SYNC)";" \
217 "nop;" \
218 "nop;" \
219 : \
220 : [a0] "r"(a0)); \
221 mips_barrier(); \
222 } struct __hack
223
224 #ifdef CPU_CNMIPS
225 static __inline void mips_sync_icache (void)
226 {
227 __asm __volatile (
228 ".set push\n"
229 ".set mips64\n"
230 ".word 0x041f0000\n" /* xxx ICACHE */
231 "nop\n"
232 ".set pop\n"
233 : : );
234 }
235 #endif
236
237 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
238 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
239 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
240 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
241 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
242 #ifdef CPU_CNMIPS
243 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
244 #endif
245 #ifdef BERI_LARGE_TLB
246 MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5);
247 #endif
248 #if defined(CPU_NLM) || defined(BERI_LARGE_TLB)
249 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
250 #endif
251 #if defined(CPU_NLM) || defined(CPU_MIPS1004K)
252 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
253 #endif
254 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
255 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
256 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
257 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
258 #if !defined(__mips_n64)
259 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
260 #endif
261 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
262 MIPS_RW32_COP0_SEL(cmgcrbase, 15, 3);
263
264 /* XXX: Some of these registers are specific to MIPS32. */
265 #if !defined(__mips_n64)
266 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
267 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
268 #endif
269 #ifdef CPU_NLM
270 MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
271 #endif
272 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
273 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
274 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
275 #endif
276 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
277 /* XXX 64-bit? */
278 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
279 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
280 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
281 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
282 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
283 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
284 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
285 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
286 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
287
288 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
289 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
290 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
291 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
292
293 #undef MIPS_RW32_COP0
294 #undef MIPS_RW32_COP0_SEL
295
296 static __inline register_t
297 intr_disable(void)
298 {
299 register_t s;
300
301 s = mips_rd_status();
302 mips_wr_status(s & ~MIPS_SR_INT_IE);
303
304 return (s & MIPS_SR_INT_IE);
305 }
306
307 static __inline register_t
308 intr_enable(void)
309 {
310 register_t s;
311
312 s = mips_rd_status();
313 mips_wr_status(s | MIPS_SR_INT_IE);
314
315 return (s);
316 }
317
318 static __inline void
319 intr_restore(register_t ie)
320 {
321 if (ie == MIPS_SR_INT_IE) {
322 intr_enable();
323 }
324 }
325
326 static __inline uint32_t
327 set_intr_mask(uint32_t mask)
328 {
329 uint32_t ostatus;
330
331 ostatus = mips_rd_status();
332 mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
333 mips_wr_status(mask);
334 return (ostatus);
335 }
336
337 static __inline uint32_t
338 get_intr_mask(void)
339 {
340
341 return (mips_rd_status() & MIPS_SR_INT_MASK);
342 }
343
344 static __inline void
345 breakpoint(void)
346 {
347 __asm __volatile ("break");
348 }
349
350 #if defined(__GNUC__) && !defined(__mips_o32)
351 #define mips3_ld(a) (*(const volatile uint64_t *)(a))
352 #define mips3_sd(a, v) (*(volatile uint64_t *)(a) = (v))
353 #else
354 uint64_t mips3_ld(volatile uint64_t *va);
355 void mips3_sd(volatile uint64_t *, uint64_t);
356 #endif /* __GNUC__ */
357
358 #endif /* _KERNEL */
359
360 #define readb(va) (*(volatile uint8_t *) (va))
361 #define readw(va) (*(volatile uint16_t *) (va))
362 #define readl(va) (*(volatile uint32_t *) (va))
363 #if defined(__GNUC__) && !defined(__mips_o32)
364 #define readq(a) (*(volatile uint64_t *)(a))
365 #endif
366
367 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
368 #define writew(va, d) (*(volatile uint16_t *) (va) = (d))
369 #define writel(va, d) (*(volatile uint32_t *) (va) = (d))
370 #if defined(__GNUC__) && !defined(__mips_o32)
371 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d))
372 #endif
373
374 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: 9ce54f1bc2c126a04c3bf004e85197e4
|