1 /* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
2
3 /*-
4 * Copyright (c) 2002-2004 Juli Mallett. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 /*
28 * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Per Fogelstrom.
41 * 4. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD$
57 */
58
59 #ifndef _MACHINE_CPUFUNC_H_
60 #define _MACHINE_CPUFUNC_H_
61
62 #include <sys/types.h>
63 #include <machine/cpuregs.h>
64
65 /*
66 * These functions are required by user-land atomi ops
67 */
68
69 static __inline void
70 mips_barrier(void)
71 {
72 #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
73 __compiler_membar();
74 #else
75 __asm __volatile (".set noreorder\n\t"
76 "nop\n\t"
77 "nop\n\t"
78 "nop\n\t"
79 "nop\n\t"
80 "nop\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t"
84 ".set reorder\n\t"
85 : : : "memory");
86 #endif
87 }
88
89 static __inline void
90 mips_cp0_sync(void)
91 {
92 __asm __volatile (__XSTRING(COP0_SYNC));
93 }
94
95 static __inline void
96 mips_wbflush(void)
97 {
98 #if defined(CPU_CNMIPS)
99 __asm __volatile (".set noreorder\n\t"
100 "syncw\n\t"
101 ".set reorder\n"
102 : : : "memory");
103 #else
104 __asm __volatile ("sync" : : : "memory");
105 mips_barrier();
106 #endif
107 }
108
109 static __inline void
110 breakpoint(void)
111 {
112 __asm __volatile ("break");
113 }
114
115 #ifdef _KERNEL
116 /*
117 * XXX
118 * It would be nice to add variants that read/write register_t, to avoid some
119 * ABI checks.
120 */
121 #if defined(__mips_n32) || defined(__mips_n64)
122 #define MIPS_RW64_COP0(n,r) \
123 static __inline uint64_t \
124 mips_rd_ ## n (void) \
125 { \
126 int v0; \
127 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \
128 : [v0] "=&r"(v0)); \
129 mips_barrier(); \
130 return (v0); \
131 } \
132 static __inline void \
133 mips_wr_ ## n (uint64_t a0) \
134 { \
135 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \
136 __XSTRING(COP0_SYNC)";" \
137 "nop;" \
138 "nop;" \
139 : \
140 : [a0] "r"(a0)); \
141 mips_barrier(); \
142 } struct __hack
143
144 #define MIPS_RW64_COP0_SEL(n,r,s) \
145 static __inline uint64_t \
146 mips_rd_ ## n(void) \
147 { \
148 int v0; \
149 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
150 : [v0] "=&r"(v0)); \
151 mips_barrier(); \
152 return (v0); \
153 } \
154 static __inline void \
155 mips_wr_ ## n(uint64_t a0) \
156 { \
157 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
158 __XSTRING(COP0_SYNC)";" \
159 : \
160 : [a0] "r"(a0)); \
161 mips_barrier(); \
162 } struct __hack
163
164 #if defined(__mips_n64)
165 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
166 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
167 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
168 #ifdef CPU_CNMIPS
169 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
170 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
171 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
172 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
173 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
174 #endif
175 #endif
176 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
177 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
178 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
179 #endif
180 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
181
182 #undef MIPS_RW64_COP0
183 #undef MIPS_RW64_COP0_SEL
184 #endif
185
186 #define MIPS_RW32_COP0(n,r) \
187 static __inline uint32_t \
188 mips_rd_ ## n (void) \
189 { \
190 int v0; \
191 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \
192 : [v0] "=&r"(v0)); \
193 mips_barrier(); \
194 return (v0); \
195 } \
196 static __inline void \
197 mips_wr_ ## n (uint32_t a0) \
198 { \
199 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \
200 __XSTRING(COP0_SYNC)";" \
201 "nop;" \
202 "nop;" \
203 : \
204 : [a0] "r"(a0)); \
205 mips_barrier(); \
206 } struct __hack
207
208 #define MIPS_RW32_COP0_SEL(n,r,s) \
209 static __inline uint32_t \
210 mips_rd_ ## n(void) \
211 { \
212 int v0; \
213 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
214 : [v0] "=&r"(v0)); \
215 mips_barrier(); \
216 return (v0); \
217 } \
218 static __inline void \
219 mips_wr_ ## n(uint32_t a0) \
220 { \
221 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
222 __XSTRING(COP0_SYNC)";" \
223 "nop;" \
224 "nop;" \
225 : \
226 : [a0] "r"(a0)); \
227 mips_barrier(); \
228 } struct __hack
229
230 #ifdef CPU_CNMIPS
231 static __inline void mips_sync_icache (void)
232 {
233 __asm __volatile (
234 ".set push\n"
235 ".set mips64\n"
236 ".word 0x041f0000\n" /* xxx ICACHE */
237 "nop\n"
238 ".set pop\n"
239 : : );
240 }
241 #endif
242
243 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
244 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
245 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
246 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
247 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
248 #ifdef CPU_CNMIPS
249 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
250 #endif
251 #ifdef BERI_LARGE_TLB
252 MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5);
253 #endif
254 #if defined(CPU_NLM) || defined(BERI_LARGE_TLB)
255 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
256 #endif
257 #if defined(CPU_NLM) || defined(CPU_MIPS1004K)
258 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
259 #endif
260 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
261 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
262 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
263 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
264 #if !defined(__mips_n64)
265 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
266 #endif
267 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
268 MIPS_RW32_COP0_SEL(cmgcrbase, 15, 3);
269
270 /* XXX: Some of these registers are specific to MIPS32. */
271 #if !defined(__mips_n64)
272 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
273 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
274 #endif
275 #ifdef CPU_NLM
276 MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
277 #endif
278 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
279 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
280 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
281 #endif
282 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
283 /* XXX 64-bit? */
284 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
285 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
286 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
287 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
288 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
289 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
290 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
291 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
292 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
293
294 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
295 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
296 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
297 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
298
299 #undef MIPS_RW32_COP0
300 #undef MIPS_RW32_COP0_SEL
301
302 static __inline register_t
303 intr_disable(void)
304 {
305 register_t s;
306
307 s = mips_rd_status();
308 mips_wr_status(s & ~MIPS_SR_INT_IE);
309
310 return (s & MIPS_SR_INT_IE);
311 }
312
313 static __inline register_t
314 intr_enable(void)
315 {
316 register_t s;
317
318 s = mips_rd_status();
319 mips_wr_status(s | MIPS_SR_INT_IE);
320
321 return (s);
322 }
323
324 static __inline void
325 intr_restore(register_t ie)
326 {
327 if (ie == MIPS_SR_INT_IE) {
328 intr_enable();
329 }
330 }
331
332 static __inline uint32_t
333 set_intr_mask(uint32_t mask)
334 {
335 uint32_t ostatus;
336
337 ostatus = mips_rd_status();
338 mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
339 mips_wr_status(mask);
340 return (ostatus);
341 }
342
343 static __inline uint32_t
344 get_intr_mask(void)
345 {
346
347 return (mips_rd_status() & MIPS_SR_INT_MASK);
348 }
349
350 #if defined(__GNUC__) && !defined(__mips_o32)
351 #define mips3_ld(a) (*(const volatile uint64_t *)(a))
352 #define mips3_sd(a, v) (*(volatile uint64_t *)(a) = (v))
353 #else
354 uint64_t mips3_ld(volatile uint64_t *va);
355 void mips3_sd(volatile uint64_t *, uint64_t);
356 #endif /* __GNUC__ */
357
358 #endif /* _KERNEL */
359
360 #define readb(va) (*(volatile uint8_t *) (va))
361 #define readw(va) (*(volatile uint16_t *) (va))
362 #define readl(va) (*(volatile uint32_t *) (va))
363 #if defined(__GNUC__) && !defined(__mips_o32)
364 #define readq(a) (*(volatile uint64_t *)(a))
365 #endif
366
367 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
368 #define writew(va, d) (*(volatile uint16_t *) (va) = (d))
369 #define writel(va, d) (*(volatile uint32_t *) (va) = (d))
370 #if defined(__GNUC__) && !defined(__mips_o32)
371 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d))
372 #endif
373
374 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: bbbbe4f9f7e2952cc578843494fc6a0f
|