1 /*-
2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
3 * reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * NETLOGIC_BSD
29 * $FreeBSD$
30 */
31
32 #ifndef __NLM_MIPS_EXTNS_H__
33 #define __NLM_MIPS_EXTNS_H__
34
35 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
36 static __inline__ int32_t nlm_swapw(int32_t *loc, int32_t val)
37 {
38 int32_t oldval = 0;
39
40 __asm__ __volatile__ (
41 ".set push\n"
42 ".set noreorder\n"
43 "move $9, %2\n"
44 "move $8, %3\n"
45 ".word 0x71280014\n" /* "swapw $8, $9\n" */
46 "move %1, $8\n"
47 ".set pop\n"
48 : "+m" (*loc), "=r" (oldval)
49 : "r" (loc), "r" (val)
50 : "$8", "$9" );
51
52 return oldval;
53 }
54
55 static __inline__ uint32_t nlm_swapwu(int32_t *loc, uint32_t val)
56 {
57 uint32_t oldval;
58
59 __asm__ __volatile__ (
60 ".set push\n"
61 ".set noreorder\n"
62 "move $9, %2\n"
63 "move $8, %3\n"
64 ".word 0x71280015\n" /* "swapwu $8, $9\n" */
65 "move %1, $8\n"
66 ".set pop\n"
67 : "+m" (*loc), "=r" (oldval)
68 : "r" (loc), "r" (val)
69 : "$8", "$9" );
70
71 return oldval;
72 }
73
74 #if (__mips == 64)
75 static __inline__ uint64_t nlm_swapd(int32_t *loc, uint64_t val)
76 {
77 uint64_t oldval;
78
79 __asm__ __volatile__ (
80 ".set push\n"
81 ".set noreorder\n"
82 "move $9, %2\n"
83 "move $8, %3\n"
84 ".word 0x71280014\n" /* "swapw $8, $9\n" */
85 "move %1, $8\n"
86 ".set pop\n"
87 : "+m" (*loc), "=r" (oldval)
88 : "r" (loc), "r" (val)
89 : "$8", "$9" );
90
91 return oldval;
92 }
93 #endif
94
95 /*
96 * Atomic increment a unsigned int
97 */
98 static __inline unsigned int
99 nlm_ldaddwu(unsigned int value, unsigned int *addr)
100 {
101 __asm__ __volatile__(
102 ".set push\n"
103 ".set noreorder\n"
104 "move $8, %2\n"
105 "move $9, %3\n"
106 ".word 0x71280011\n" /* ldaddwu $8, $9 */
107 "move %0, $8\n"
108 ".set pop\n"
109 : "=&r"(value), "+m"(*addr)
110 : ""(value), "r" ((unsigned long)addr)
111 : "$8", "$9");
112
113 return (value);
114 }
115 /*
116 * 32 bit read write for c0
117 */
118 #define read_c0_register32(reg, sel) \
119 ({ \
120 uint32_t __rv; \
121 __asm__ __volatile__( \
122 ".set push\n\t" \
123 ".set mips32\n\t" \
124 "mfc0 %0, $%1, %2\n\t" \
125 ".set pop\n" \
126 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
127 __rv; \
128 })
129
130 #define write_c0_register32(reg, sel, value) \
131 __asm__ __volatile__( \
132 ".set push\n\t" \
133 ".set mips32\n\t" \
134 "mtc0 %0, $%1, %2\n\t" \
135 ".set pop\n" \
136 : : "r" (value), "i" (reg), "i" (sel) );
137
138 #if defined(__mips_n64) || defined(__mips_n32)
139 /*
140 * On 64 bit compilation, the operations are simple
141 */
142 #define read_c0_register64(reg, sel) \
143 ({ \
144 uint64_t __rv; \
145 __asm__ __volatile__( \
146 ".set push\n\t" \
147 ".set mips64\n\t" \
148 "dmfc0 %0, $%1, %2\n\t" \
149 ".set pop\n" \
150 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
151 __rv; \
152 })
153
154 #define write_c0_register64(reg, sel, value) \
155 __asm__ __volatile__( \
156 ".set push\n\t" \
157 ".set mips64\n\t" \
158 "dmtc0 %0, $%1, %2\n\t" \
159 ".set pop\n" \
160 : : "r" (value), "i" (reg), "i" (sel) );
161 #else /* ! (defined(__mips_n64) || defined(__mips_n32)) */
162
163 /*
164 * 32 bit compilation, 64 bit values has to split
165 */
166 #define read_c0_register64(reg, sel) \
167 ({ \
168 uint32_t __high, __low; \
169 __asm__ __volatile__( \
170 ".set push\n\t" \
171 ".set noreorder\n\t" \
172 ".set mips64\n\t" \
173 "dmfc0 $8, $%2, %3\n\t" \
174 "dsra32 %0, $8, 0\n\t" \
175 "sll %1, $8, 0\n\t" \
176 ".set pop\n" \
177 : "=r"(__high), "=r"(__low): "i"(reg), "i"(sel) \
178 : "$8"); \
179 ((uint64_t)__high << 32) | __low; \
180 })
181
182 #define write_c0_register64(reg, sel, value) \
183 do { \
184 uint32_t __high = value >> 32; \
185 uint32_t __low = value & 0xffffffff; \
186 __asm__ __volatile__( \
187 ".set push\n\t" \
188 ".set noreorder\n\t" \
189 ".set mips64\n\t" \
190 "dsll32 $8, %1, 0\n\t" \
191 "dsll32 $9, %0, 0\n\t" \
192 "dsrl32 $8, $8, 0\n\t" \
193 "or $8, $8, $9\n\t" \
194 "dmtc0 $8, $%2, %3\n\t" \
195 ".set pop" \
196 :: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
197 :"$8", "$9"); \
198 } while(0)
199
200 #endif
201 /* functions to write to and read from the extended
202 * cp0 registers.
203 * EIRR : Extended Interrupt Request Register
204 * cp0 register 9 sel 6
205 * bits 0...7 are same as cause register 8...15
206 * EIMR : Extended Interrupt Mask Register
207 * cp0 register 9 sel 7
208 * bits 0...7 are same as status register 8...15
209 */
210 static __inline uint64_t
211 nlm_read_c0_eirr(void)
212 {
213
214 return (read_c0_register64(9, 6));
215 }
216
217 static __inline void
218 nlm_write_c0_eirr(uint64_t val)
219 {
220
221 write_c0_register64(9, 6, val);
222 }
223
224 static __inline uint64_t
225 nlm_read_c0_eimr(void)
226 {
227
228 return (read_c0_register64(9, 7));
229 }
230
231 static __inline void
232 nlm_write_c0_eimr(uint64_t val)
233 {
234
235 write_c0_register64(9, 7, val);
236 }
237
238 static __inline__ uint32_t
239 nlm_read_c0_ebase(void)
240 {
241
242 return (read_c0_register32(15, 1));
243 }
244
245 static __inline__ int
246 nlm_nodeid(void)
247 {
248 return (nlm_read_c0_ebase() >> 5) & 0x3;
249 }
250
251 static __inline__ int
252 nlm_cpuid(void)
253 {
254 return nlm_read_c0_ebase() & 0x1f;
255 }
256
257 static __inline__ int
258 nlm_threadid(void)
259 {
260 return nlm_read_c0_ebase() & 0x3;
261 }
262
263 static __inline__ int
264 nlm_coreid(void)
265 {
266 return (nlm_read_c0_ebase() >> 2) & 0x7;
267 }
268 #endif
269
270 #define XLP_MAX_NODES 4
271 #define XLP_MAX_CORES 8
272 #define XLP_MAX_THREADS 4
273
274 #endif
Cache object: c24c3533ab6a13d5aa10058f41b78b04
|