1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
5 * reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * NETLOGIC_BSD
31 * $FreeBSD$
32 */
33
34 #ifndef __NLM_MIPS_EXTNS_H__
35 #define __NLM_MIPS_EXTNS_H__
36
37 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
38 static __inline__ int32_t nlm_swapw(int32_t *loc, int32_t val)
39 {
40 int32_t oldval = 0;
41
42 __asm__ __volatile__ (
43 ".set push\n"
44 ".set noreorder\n"
45 "move $9, %2\n"
46 "move $8, %3\n"
47 ".word 0x71280014\n" /* "swapw $8, $9\n" */
48 "move %1, $8\n"
49 ".set pop\n"
50 : "+m" (*loc), "=r" (oldval)
51 : "r" (loc), "r" (val)
52 : "$8", "$9" );
53
54 return oldval;
55 }
56
57 static __inline__ uint32_t nlm_swapwu(int32_t *loc, uint32_t val)
58 {
59 uint32_t oldval;
60
61 __asm__ __volatile__ (
62 ".set push\n"
63 ".set noreorder\n"
64 "move $9, %2\n"
65 "move $8, %3\n"
66 ".word 0x71280015\n" /* "swapwu $8, $9\n" */
67 "move %1, $8\n"
68 ".set pop\n"
69 : "+m" (*loc), "=r" (oldval)
70 : "r" (loc), "r" (val)
71 : "$8", "$9" );
72
73 return oldval;
74 }
75
76 #if (__mips == 64)
77 static __inline__ uint64_t nlm_swapd(int32_t *loc, uint64_t val)
78 {
79 uint64_t oldval;
80
81 __asm__ __volatile__ (
82 ".set push\n"
83 ".set noreorder\n"
84 "move $9, %2\n"
85 "move $8, %3\n"
86 ".word 0x71280014\n" /* "swapw $8, $9\n" */
87 "move %1, $8\n"
88 ".set pop\n"
89 : "+m" (*loc), "=r" (oldval)
90 : "r" (loc), "r" (val)
91 : "$8", "$9" );
92
93 return oldval;
94 }
95 #endif
96
97 /*
98 * Atomic increment a unsigned int
99 */
100 static __inline unsigned int
101 nlm_ldaddwu(unsigned int value, unsigned int *addr)
102 {
103 __asm__ __volatile__(
104 ".set push\n"
105 ".set noreorder\n"
106 "move $8, %2\n"
107 "move $9, %3\n"
108 ".word 0x71280011\n" /* ldaddwu $8, $9 */
109 "move %0, $8\n"
110 ".set pop\n"
111 : "=&r"(value), "+m"(*addr)
112 : ""(value), "r" ((unsigned long)addr)
113 : "$8", "$9");
114
115 return (value);
116 }
117 /*
118 * 32 bit read write for c0
119 */
120 #define read_c0_register32(reg, sel) \
121 ({ \
122 uint32_t __rv; \
123 __asm__ __volatile__( \
124 ".set push\n\t" \
125 ".set mips32\n\t" \
126 "mfc0 %0, $%1, %2\n\t" \
127 ".set pop\n" \
128 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
129 __rv; \
130 })
131
132 #define write_c0_register32(reg, sel, value) \
133 __asm__ __volatile__( \
134 ".set push\n\t" \
135 ".set mips32\n\t" \
136 "mtc0 %0, $%1, %2\n\t" \
137 ".set pop\n" \
138 : : "r" (value), "i" (reg), "i" (sel) );
139
140 #if defined(__mips_n64) || defined(__mips_n32)
141 /*
142 * On 64 bit compilation, the operations are simple
143 */
144 #define read_c0_register64(reg, sel) \
145 ({ \
146 uint64_t __rv; \
147 __asm__ __volatile__( \
148 ".set push\n\t" \
149 ".set mips64\n\t" \
150 "dmfc0 %0, $%1, %2\n\t" \
151 ".set pop\n" \
152 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
153 __rv; \
154 })
155
156 #define write_c0_register64(reg, sel, value) \
157 __asm__ __volatile__( \
158 ".set push\n\t" \
159 ".set mips64\n\t" \
160 "dmtc0 %0, $%1, %2\n\t" \
161 ".set pop\n" \
162 : : "r" (value), "i" (reg), "i" (sel) );
163 #else /* ! (defined(__mips_n64) || defined(__mips_n32)) */
164
165 /*
166 * 32 bit compilation, 64 bit values has to split
167 */
168 #define read_c0_register64(reg, sel) \
169 ({ \
170 uint32_t __high, __low; \
171 __asm__ __volatile__( \
172 ".set push\n\t" \
173 ".set noreorder\n\t" \
174 ".set mips64\n\t" \
175 "dmfc0 $8, $%2, %3\n\t" \
176 "dsra32 %0, $8, 0\n\t" \
177 "sll %1, $8, 0\n\t" \
178 ".set pop\n" \
179 : "=r"(__high), "=r"(__low): "i"(reg), "i"(sel) \
180 : "$8"); \
181 ((uint64_t)__high << 32) | __low; \
182 })
183
184 #define write_c0_register64(reg, sel, value) \
185 do { \
186 uint32_t __high = value >> 32; \
187 uint32_t __low = value & 0xffffffff; \
188 __asm__ __volatile__( \
189 ".set push\n\t" \
190 ".set noreorder\n\t" \
191 ".set mips64\n\t" \
192 "dsll32 $8, %1, 0\n\t" \
193 "dsll32 $9, %0, 0\n\t" \
194 "dsrl32 $8, $8, 0\n\t" \
195 "or $8, $8, $9\n\t" \
196 "dmtc0 $8, $%2, %3\n\t" \
197 ".set pop" \
198 :: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
199 :"$8", "$9"); \
200 } while(0)
201
202 #endif
203 /* functions to write to and read from the extended
204 * cp0 registers.
205 * EIRR : Extended Interrupt Request Register
206 * cp0 register 9 sel 6
207 * bits 0...7 are same as cause register 8...15
208 * EIMR : Extended Interrupt Mask Register
209 * cp0 register 9 sel 7
210 * bits 0...7 are same as status register 8...15
211 */
212 static __inline uint64_t
213 nlm_read_c0_eirr(void)
214 {
215
216 return (read_c0_register64(9, 6));
217 }
218
219 static __inline void
220 nlm_write_c0_eirr(uint64_t val)
221 {
222
223 write_c0_register64(9, 6, val);
224 }
225
226 static __inline uint64_t
227 nlm_read_c0_eimr(void)
228 {
229
230 return (read_c0_register64(9, 7));
231 }
232
233 static __inline void
234 nlm_write_c0_eimr(uint64_t val)
235 {
236
237 write_c0_register64(9, 7, val);
238 }
239
240 static __inline__ uint32_t
241 nlm_read_c0_ebase(void)
242 {
243
244 return (read_c0_register32(15, 1));
245 }
246
247 static __inline__ int
248 nlm_nodeid(void)
249 {
250 return (nlm_read_c0_ebase() >> 5) & 0x3;
251 }
252
253 static __inline__ int
254 nlm_cpuid(void)
255 {
256 return nlm_read_c0_ebase() & 0x1f;
257 }
258
259 static __inline__ int
260 nlm_threadid(void)
261 {
262 return nlm_read_c0_ebase() & 0x3;
263 }
264
265 static __inline__ int
266 nlm_coreid(void)
267 {
268 return (nlm_read_c0_ebase() >> 2) & 0x7;
269 }
270 #endif
271
272 #define XLP_MAX_NODES 4
273 #define XLP_MAX_CORES 8
274 #define XLP_MAX_THREADS 4
275
276 #endif
Cache object: 40044a29d2724065c8bce4b89c88a868
|