1 /* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
2
3 /*-
4 * Copyright (c) 2002-2004 Juli Mallett. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 /*
28 * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by Per Fogelstrom.
41 * 4. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD: releng/9.0/sys/mips/include/cpufunc.h 224115 2011-07-16 20:31:29Z jchandra $
57 */
58
59 #ifndef _MACHINE_CPUFUNC_H_
60 #define _MACHINE_CPUFUNC_H_
61
62 #include <sys/types.h>
63 #include <machine/cpuregs.h>
64
65 /*
66 * These functions are required by user-land atomi ops
67 */
68
69 static __inline void
70 mips_barrier(void)
71 {
72 #ifdef CPU_CNMIPS
73 __asm __volatile("" : : : "memory");
74 #else
75 __asm __volatile (".set noreorder\n\t"
76 "nop\n\t"
77 "nop\n\t"
78 "nop\n\t"
79 "nop\n\t"
80 "nop\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t"
84 ".set reorder\n\t"
85 : : : "memory");
86 #endif
87 }
88
89 static __inline void
90 mips_cp0_sync(void)
91 {
92 __asm __volatile (__XSTRING(COP0_SYNC));
93 }
94
95 static __inline void
96 mips_wbflush(void)
97 {
98 #if defined(CPU_CNMIPS)
99 __asm __volatile (".set noreorder\n\t"
100 "syncw\n\t"
101 ".set reorder\n"
102 : : : "memory");
103 #else
104 __asm __volatile ("sync" : : : "memory");
105 mips_barrier();
106 #endif
107 }
108
109 static __inline void
110 mips_read_membar(void)
111 {
112 /* Nil */
113 }
114
115 static __inline void
116 mips_write_membar(void)
117 {
118 mips_wbflush();
119 }
120
121 #ifdef _KERNEL
122 /*
123 * XXX
124 * It would be nice to add variants that read/write register_t, to avoid some
125 * ABI checks.
126 */
127 #if defined(__mips_n32) || defined(__mips_n64)
128 #define MIPS_RW64_COP0(n,r) \
129 static __inline uint64_t \
130 mips_rd_ ## n (void) \
131 { \
132 int v0; \
133 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \
134 : [v0] "=&r"(v0)); \
135 mips_barrier(); \
136 return (v0); \
137 } \
138 static __inline void \
139 mips_wr_ ## n (uint64_t a0) \
140 { \
141 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \
142 __XSTRING(COP0_SYNC)";" \
143 "nop;" \
144 "nop;" \
145 : \
146 : [a0] "r"(a0)); \
147 mips_barrier(); \
148 } struct __hack
149
150 #define MIPS_RW64_COP0_SEL(n,r,s) \
151 static __inline uint64_t \
152 mips_rd_ ## n(void) \
153 { \
154 int v0; \
155 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
156 : [v0] "=&r"(v0)); \
157 mips_barrier(); \
158 return (v0); \
159 } \
160 static __inline void \
161 mips_wr_ ## n(uint64_t a0) \
162 { \
163 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
164 __XSTRING(COP0_SYNC)";" \
165 : \
166 : [a0] "r"(a0)); \
167 mips_barrier(); \
168 } struct __hack
169
170 #if defined(__mips_n64)
171 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
172 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
173 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
174 #ifdef CPU_CNMIPS
175 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
176 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
177 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
178 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
179 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
180 #endif
181 #endif
182 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
183 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
184 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
185 #endif
186 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
187
188 #undef MIPS_RW64_COP0
189 #undef MIPS_RW64_COP0_SEL
190 #endif
191
192 #define MIPS_RW32_COP0(n,r) \
193 static __inline uint32_t \
194 mips_rd_ ## n (void) \
195 { \
196 int v0; \
197 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \
198 : [v0] "=&r"(v0)); \
199 mips_barrier(); \
200 return (v0); \
201 } \
202 static __inline void \
203 mips_wr_ ## n (uint32_t a0) \
204 { \
205 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \
206 __XSTRING(COP0_SYNC)";" \
207 "nop;" \
208 "nop;" \
209 : \
210 : [a0] "r"(a0)); \
211 mips_barrier(); \
212 } struct __hack
213
214 #define MIPS_RW32_COP0_SEL(n,r,s) \
215 static __inline uint32_t \
216 mips_rd_ ## n(void) \
217 { \
218 int v0; \
219 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \
220 : [v0] "=&r"(v0)); \
221 mips_barrier(); \
222 return (v0); \
223 } \
224 static __inline void \
225 mips_wr_ ## n(uint32_t a0) \
226 { \
227 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \
228 __XSTRING(COP0_SYNC)";" \
229 "nop;" \
230 "nop;" \
231 : \
232 : [a0] "r"(a0)); \
233 mips_barrier(); \
234 } struct __hack
235
236 #ifdef CPU_CNMIPS
237 static __inline void mips_sync_icache (void)
238 {
239 __asm __volatile (
240 ".set push\n"
241 ".set mips64\n"
242 ".word 0x041f0000\n" /* xxx ICACHE */
243 "nop\n"
244 ".set pop\n"
245 : : );
246 }
247 #endif
248
249 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
250 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
251 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
252 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
253 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
254 #ifdef CPU_CNMIPS
255 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
256 #endif
257 #ifdef CPU_NLM
258 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
259 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
260 #endif
261 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
262 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
263 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
264 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
265 #if !defined(__mips_n64)
266 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
267 #endif
268 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
269
270 /* XXX: Some of these registers are specific to MIPS32. */
271 #if !defined(__mips_n64)
272 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
273 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
274 #endif
275 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
276 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
277 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
278 #endif
279 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
280 /* XXX 64-bit? */
281 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
282 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
283 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
284 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
285 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
286 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
287 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
288 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
289 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
290
291 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
292 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
293 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
294 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
295
296 #undef MIPS_RW32_COP0
297 #undef MIPS_RW32_COP0_SEL
298
299 static __inline register_t
300 intr_disable(void)
301 {
302 register_t s;
303
304 s = mips_rd_status();
305 mips_wr_status(s & ~MIPS_SR_INT_IE);
306
307 return (s & MIPS_SR_INT_IE);
308 }
309
310 static __inline register_t
311 intr_enable(void)
312 {
313 register_t s;
314
315 s = mips_rd_status();
316 mips_wr_status(s | MIPS_SR_INT_IE);
317
318 return (s);
319 }
320
321 static __inline void
322 intr_restore(register_t ie)
323 {
324 if (ie == MIPS_SR_INT_IE) {
325 intr_enable();
326 }
327 }
328
329 static __inline uint32_t
330 set_intr_mask(uint32_t mask)
331 {
332 uint32_t ostatus;
333
334 ostatus = mips_rd_status();
335 mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
336 mips_wr_status(mask);
337 return (ostatus);
338 }
339
340 static __inline uint32_t
341 get_intr_mask(void)
342 {
343
344 return (mips_rd_status() & MIPS_SR_INT_MASK);
345 }
346
347 static __inline void
348 breakpoint(void)
349 {
350 __asm __volatile ("break");
351 }
352
353 #if defined(__GNUC__) && !defined(__mips_o32)
354 static inline uint64_t
355 mips3_ld(const volatile uint64_t *va)
356 {
357 uint64_t rv;
358
359 #if defined(_LP64)
360 rv = *va;
361 #else
362 __asm volatile("ld %0,0(%1)" : "=d"(rv) : "r"(va));
363 #endif
364
365 return (rv);
366 }
367
368 static inline void
369 mips3_sd(volatile uint64_t *va, uint64_t v)
370 {
371 #if defined(_LP64)
372 *va = v;
373 #else
374 __asm volatile("sd %0,0(%1)" :: "r"(v), "r"(va));
375 #endif
376 }
377 #else
378 uint64_t mips3_ld(volatile uint64_t *va);
379 void mips3_sd(volatile uint64_t *, uint64_t);
380 #endif /* __GNUC__ */
381
382 #endif /* _KERNEL */
383
384 #define readb(va) (*(volatile uint8_t *) (va))
385 #define readw(va) (*(volatile uint16_t *) (va))
386 #define readl(va) (*(volatile uint32_t *) (va))
387
388 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
389 #define writew(va, d) (*(volatile uint16_t *) (va) = (d))
390 #define writel(va, d) (*(volatile uint32_t *) (va) = (d))
391
392 /*
393 * I/O macros.
394 */
395
396 #define outb(a,v) (*(volatile unsigned char*)(a) = (v))
397 #define out8(a,v) (*(volatile unsigned char*)(a) = (v))
398 #define outw(a,v) (*(volatile unsigned short*)(a) = (v))
399 #define out16(a,v) outw(a,v)
400 #define outl(a,v) (*(volatile unsigned int*)(a) = (v))
401 #define out32(a,v) outl(a,v)
402 #define inb(a) (*(volatile unsigned char*)(a))
403 #define in8(a) (*(volatile unsigned char*)(a))
404 #define inw(a) (*(volatile unsigned short*)(a))
405 #define in16(a) inw(a)
406 #define inl(a) (*(volatile unsigned int*)(a))
407 #define in32(a) inl(a)
408
409 #define out8rb(a,v) (*(volatile unsigned char*)(a) = (v))
410 #define out16rb(a,v) (__out16rb((volatile uint16_t *)(a), v))
411 #define out32rb(a,v) (__out32rb((volatile uint32_t *)(a), v))
412 #define in8rb(a) (*(volatile unsigned char*)(a))
413 #define in16rb(a) (__in16rb((volatile uint16_t *)(a)))
414 #define in32rb(a) (__in32rb((volatile uint32_t *)(a)))
415
416 #define _swap_(x) (((x) >> 24) | ((x) << 24) | \
417 (((x) >> 8) & 0xff00) | (((x) & 0xff00) << 8))
418
419 static __inline void __out32rb(volatile uint32_t *, uint32_t);
420 static __inline void __out16rb(volatile uint16_t *, uint16_t);
421 static __inline uint32_t __in32rb(volatile uint32_t *);
422 static __inline uint16_t __in16rb(volatile uint16_t *);
423
424 static __inline void
425 __out32rb(volatile uint32_t *a, uint32_t v)
426 {
427 uint32_t _v_ = v;
428
429 _v_ = _swap_(_v_);
430 out32(a, _v_);
431 }
432
433 static __inline void
434 __out16rb(volatile uint16_t *a, uint16_t v)
435 {
436 uint16_t _v_;
437
438 _v_ = ((v >> 8) & 0xff) | (v << 8);
439 out16(a, _v_);
440 }
441
442 static __inline uint32_t
443 __in32rb(volatile uint32_t *a)
444 {
445 uint32_t _v_;
446
447 _v_ = in32(a);
448 _v_ = _swap_(_v_);
449 return _v_;
450 }
451
452 static __inline uint16_t
453 __in16rb(volatile uint16_t *a)
454 {
455 uint16_t _v_;
456
457 _v_ = in16(a);
458 _v_ = ((_v_ >> 8) & 0xff) | (_v_ << 8);
459 return _v_;
460 }
461
462 void insb(uint8_t *, uint8_t *,int);
463 void insw(uint16_t *, uint16_t *,int);
464 void insl(uint32_t *, uint32_t *,int);
465 void outsb(uint8_t *, const uint8_t *,int);
466 void outsw(uint16_t *, const uint16_t *,int);
467 void outsl(uint32_t *, const uint32_t *,int);
468 u_int loadandclear(volatile u_int *addr);
469
470 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: 8904559cabde105c907c3074c5f285c6
|