1 /* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
2
3 /*-
4 * Copyright (c) 2002-2004 Juli Mallett. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 /*
28 * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Per Fogelstrom.
41 * 4. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD: releng/10.3/sys/mips/include/cpufunc.h 257528 2013-11-01 21:17:45Z brooks $
57 */
58
59 #ifndef _MACHINE_CPUFUNC_H_
60 #define _MACHINE_CPUFUNC_H_
61
62 #include <sys/types.h>
63 #include <machine/cpuregs.h>
64
65 /*
66 * These functions are required by user-land atomi ops
67 */
68
69 static __inline void
70 mips_barrier(void)
71 {
72 #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
73 __compiler_membar();
74 #else
75 __asm __volatile (".set noreorder\n\t"
76 "nop\n\t"
77 "nop\n\t"
78 "nop\n\t"
79 "nop\n\t"
80 "nop\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t"
84 ".set reorder\n\t"
85 : : : "memory");
86 #endif
87 }
88
89 static __inline void
90 mips_cp0_sync(void)
91 {
92 __asm __volatile (__XSTRING(COP0_SYNC));
93 }
94
95 static __inline void
96 mips_wbflush(void)
97 {
98 #if defined(CPU_CNMIPS)
99 __asm __volatile (".set noreorder\n\t"
100 "syncw\n\t"
101 ".set reorder\n"
102 : : : "memory");
103 #else
104 __asm __volatile ("sync" : : : "memory");
105 mips_barrier();
106 #endif
107 }
108
109 #ifdef _KERNEL
110 /*
111 * XXX
112 * It would be nice to add variants that read/write register_t, to avoid some
113 * ABI checks.
114 */
115 #if defined(__mips_n32) || defined(__mips_n64)
116 #define MIPS_RW64_COP0(n,r) \
117 static __inline uint64_t \
118 mips_rd_ ## n (void) \
119 { \
120 int v0; \
121 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \
122 : [v0] "=&r"(v0)); \
123 mips_barrier(); \
124 return (v0); \
125 } \
126 static __inline void \
127 mips_wr_ ## n (uint64_t a0) \
128 { \
129 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \
130 __XSTRING(COP0_SYNC)";" \
131 "nop;" \
132 "nop;" \
133 : \
134 : [a0] "r"(a0)); \
135 mips_barrier(); \
136 } struct __hack
137
138 #define MIPS_RW64_COP0_SEL(n,r,s) \
139 static __inline uint64_t \
140 mips_rd_ ## n(void) \
141 { \
142 int v0; \
143 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
144 : [v0] "=&r"(v0)); \
145 mips_barrier(); \
146 return (v0); \
147 } \
148 static __inline void \
149 mips_wr_ ## n(uint64_t a0) \
150 { \
151 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
152 __XSTRING(COP0_SYNC)";" \
153 : \
154 : [a0] "r"(a0)); \
155 mips_barrier(); \
156 } struct __hack
157
158 #if defined(__mips_n64)
159 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
160 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
161 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
162 #ifdef CPU_CNMIPS
163 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
164 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
165 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
166 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
167 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
168 #endif
169 #endif
170 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
171 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
172 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
173 #endif
174 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
175
176 #undef MIPS_RW64_COP0
177 #undef MIPS_RW64_COP0_SEL
178 #endif
179
180 #define MIPS_RW32_COP0(n,r) \
181 static __inline uint32_t \
182 mips_rd_ ## n (void) \
183 { \
184 int v0; \
185 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \
186 : [v0] "=&r"(v0)); \
187 mips_barrier(); \
188 return (v0); \
189 } \
190 static __inline void \
191 mips_wr_ ## n (uint32_t a0) \
192 { \
193 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \
194 __XSTRING(COP0_SYNC)";" \
195 "nop;" \
196 "nop;" \
197 : \
198 : [a0] "r"(a0)); \
199 mips_barrier(); \
200 } struct __hack
201
202 #define MIPS_RW32_COP0_SEL(n,r,s) \
203 static __inline uint32_t \
204 mips_rd_ ## n(void) \
205 { \
206 int v0; \
207 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
208 : [v0] "=&r"(v0)); \
209 mips_barrier(); \
210 return (v0); \
211 } \
212 static __inline void \
213 mips_wr_ ## n(uint32_t a0) \
214 { \
215 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
216 __XSTRING(COP0_SYNC)";" \
217 "nop;" \
218 "nop;" \
219 : \
220 : [a0] "r"(a0)); \
221 mips_barrier(); \
222 } struct __hack
223
224 #ifdef CPU_CNMIPS
225 static __inline void mips_sync_icache (void)
226 {
227 __asm __volatile (
228 ".set push\n"
229 ".set mips64\n"
230 ".word 0x041f0000\n" /* xxx ICACHE */
231 "nop\n"
232 ".set pop\n"
233 : : );
234 }
235 #endif
236
237 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
238 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
239 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
240 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
241 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
242 #ifdef CPU_CNMIPS
243 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
244 #endif
245 #ifdef BERI_LARGE_TLB
246 MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5);
247 #endif
248 #if defined(CPU_NLM) || defined(BERI_LARGE_TLB)
249 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
250 #endif
251 #ifdef CPU_NLM
252 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
253 #endif
254 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
255 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
256 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
257 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
258 #if !defined(__mips_n64)
259 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
260 #endif
261 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
262
263 /* XXX: Some of these registers are specific to MIPS32. */
264 #if !defined(__mips_n64)
265 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
266 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
267 #endif
268 #ifdef CPU_NLM
269 MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
270 #endif
271 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
272 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
273 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
274 #endif
275 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
276 /* XXX 64-bit? */
277 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
278 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
279 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
280 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
281 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
282 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
283 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
284 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
285 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
286
287 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
288 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
289 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
290 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
291
292 #undef MIPS_RW32_COP0
293 #undef MIPS_RW32_COP0_SEL
294
295 static __inline register_t
296 intr_disable(void)
297 {
298 register_t s;
299
300 s = mips_rd_status();
301 mips_wr_status(s & ~MIPS_SR_INT_IE);
302
303 return (s & MIPS_SR_INT_IE);
304 }
305
306 static __inline register_t
307 intr_enable(void)
308 {
309 register_t s;
310
311 s = mips_rd_status();
312 mips_wr_status(s | MIPS_SR_INT_IE);
313
314 return (s);
315 }
316
317 static __inline void
318 intr_restore(register_t ie)
319 {
320 if (ie == MIPS_SR_INT_IE) {
321 intr_enable();
322 }
323 }
324
325 static __inline uint32_t
326 set_intr_mask(uint32_t mask)
327 {
328 uint32_t ostatus;
329
330 ostatus = mips_rd_status();
331 mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
332 mips_wr_status(mask);
333 return (ostatus);
334 }
335
336 static __inline uint32_t
337 get_intr_mask(void)
338 {
339
340 return (mips_rd_status() & MIPS_SR_INT_MASK);
341 }
342
343 static __inline void
344 breakpoint(void)
345 {
346 __asm __volatile ("break");
347 }
348
349 #if defined(__GNUC__) && !defined(__mips_o32)
350 #define mips3_ld(a) (*(const volatile uint64_t *)(a))
351 #define mips3_sd(a, v) (*(volatile uint64_t *)(a) = (v))
352 #else
353 uint64_t mips3_ld(volatile uint64_t *va);
354 void mips3_sd(volatile uint64_t *, uint64_t);
355 #endif /* __GNUC__ */
356
357 #endif /* _KERNEL */
358
359 #define readb(va) (*(volatile uint8_t *) (va))
360 #define readw(va) (*(volatile uint16_t *) (va))
361 #define readl(va) (*(volatile uint32_t *) (va))
362 #if defined(__GNUC__) && !defined(__mips_o32)
363 #define readq(a) (*(volatile uint64_t *)(a))
364 #endif
365
366 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
367 #define writew(va, d) (*(volatile uint16_t *) (va) = (d))
368 #define writel(va, d) (*(volatile uint32_t *) (va) = (d))
369 #if defined(__GNUC__) && !defined(__mips_o32)
370 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d))
371 #endif
372
373 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: 6d1775d0bcd23b4ab99407396254bdc3
|