1 /*-
2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
3 * reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * NETLOGIC_BSD
29 * $FreeBSD$
30 */
31
32 #ifndef __NLM_HAL_MMIO_H__
33 #define __NLM_HAL_MMIO_H__
34
35 /*
36 * This file contains platform specific memory mapped IO implementation
37 * and will provide a way to read 32/64 bit memory mapped registers in
38 * all ABIs
39 */
40
41 /*
42 * For o32 compilation, we have to disable interrupts and enable KX bit to
43 * access 64 bit addresses or data.
44 *
45 * We need to disable interrupts because we save just the lower 32 bits of
46 * registers in interrupt handling. So if we get hit by an interrupt while
47 * using the upper 32 bits of a register, we lose.
48 */
49 static inline uint32_t nlm_save_flags_kx(void)
50 {
51 uint32_t sr = mips_rd_status();
52
53 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
54 return (sr);
55 }
56
57 static inline uint32_t nlm_save_flags_cop2(void)
58 {
59 uint32_t sr = mips_rd_status();
60
61 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_COP_2_BIT);
62 return (sr);
63 }
64
65 static inline void nlm_restore_flags(uint32_t sr)
66 {
67 mips_wr_status(sr);
68 }
69
70 static inline uint32_t
71 nlm_load_word(uint64_t addr)
72 {
73 volatile uint32_t *p = (volatile uint32_t *)(long)addr;
74
75 return *p;
76 }
77
78 static inline void
79 nlm_store_word(uint64_t addr, uint32_t val)
80 {
81 volatile uint32_t *p = (volatile uint32_t *)(long)addr;
82
83 *p = val;
84 }
85
86 #if defined(__mips_n64) || defined(__mips_n32)
87 static inline uint64_t
88 nlm_load_dword(volatile uint64_t addr)
89 {
90 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
91
92 return *p;
93 }
94
95 static inline void
96 nlm_store_dword(volatile uint64_t addr, uint64_t val)
97 {
98 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
99
100 *p = val;
101 }
102
103 #else /* o32 */
104 static inline uint64_t
105 nlm_load_dword(uint64_t addr)
106 {
107 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
108 uint32_t valhi, vallo, sr;
109
110 sr = nlm_save_flags_kx();
111 __asm__ __volatile__(
112 ".set push\n\t"
113 ".set mips64\n\t"
114 "ld $8, 0(%2)\n\t"
115 "dsra32 %0, $8, 0\n\t"
116 "sll %1, $8, 0\n\t"
117 ".set pop\n"
118 : "=r"(valhi), "=r"(vallo)
119 : "r"(p)
120 : "$8");
121 nlm_restore_flags(sr);
122
123 return ((uint64_t)valhi << 32) | vallo;
124 }
125
126 static inline void
127 nlm_store_dword(uint64_t addr, uint64_t val)
128 {
129 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
130 uint32_t valhi, vallo, sr;
131
132 valhi = val >> 32;
133 vallo = val & 0xffffffff;
134
135 sr = nlm_save_flags_kx();
136 __asm__ __volatile__(
137 ".set push\n\t"
138 ".set mips64\n\t"
139 "dsll32 $8, %1, 0\n\t"
140 "dsll32 $9, %2, 0\n\t" /* get rid of the */
141 "dsrl32 $9, $9, 0\n\t" /* sign extend */
142 "or $9, $9, $8\n\t"
143 "sd $9, 0(%0)\n\t"
144 ".set pop\n"
145 : : "r"(p), "r"(valhi), "r"(vallo)
146 : "$8", "$9", "memory");
147 nlm_restore_flags(sr);
148 }
149 #endif
150
151 #if defined(__mips_n64)
152 static inline uint64_t
153 nlm_load_word_daddr(uint64_t addr)
154 {
155 volatile uint32_t *p = (volatile uint32_t *)(long)addr;
156
157 return *p;
158 }
159
160 static inline void
161 nlm_store_word_daddr(uint64_t addr, uint32_t val)
162 {
163 volatile uint32_t *p = (volatile uint32_t *)(long)addr;
164
165 *p = val;
166 }
167
168 static inline uint64_t
169 nlm_load_dword_daddr(uint64_t addr)
170 {
171 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
172
173 return *p;
174 }
175
176 static inline void
177 nlm_store_dword_daddr(uint64_t addr, uint64_t val)
178 {
179 volatile uint64_t *p = (volatile uint64_t *)(long)addr;
180
181 *p = val;
182 }
183
184 #elif defined(__mips_n32)
185
186 static inline uint64_t
187 nlm_load_word_daddr(uint64_t addr)
188 {
189 uint32_t val;
190
191 __asm__ __volatile__(
192 ".set push\n\t"
193 ".set mips64\n\t"
194 "lw %0, 0(%1)\n\t"
195 ".set pop\n"
196 : "=r"(val)
197 : "r"(addr));
198
199 return val;
200 }
201
202 static inline void
203 nlm_store_word_daddr(uint64_t addr, uint32_t val)
204 {
205 __asm__ __volatile__(
206 ".set push\n\t"
207 ".set mips64\n\t"
208 "sw %0, 0(%1)\n\t"
209 ".set pop\n"
210 : : "r"(val), "r"(addr)
211 : "memory");
212 }
213
214 static inline uint64_t
215 nlm_load_dword_daddr(uint64_t addr)
216 {
217 uint64_t val;
218
219 __asm__ __volatile__(
220 ".set push\n\t"
221 ".set mips64\n\t"
222 "ld %0, 0(%1)\n\t"
223 ".set pop\n"
224 : "=r"(val)
225 : "r"(addr));
226 return val;
227 }
228
229 static inline void
230 nlm_store_dword_daddr(uint64_t addr, uint64_t val)
231 {
232 __asm__ __volatile__(
233 ".set push\n\t"
234 ".set mips64\n\t"
235 "sd %0, 0(%1)\n\t"
236 ".set pop\n"
237 : : "r"(val), "r"(addr)
238 : "memory");
239 }
240
241 #else /* o32 */
242 static inline uint64_t
243 nlm_load_word_daddr(uint64_t addr)
244 {
245 uint32_t val, addrhi, addrlo, sr;
246
247 addrhi = addr >> 32;
248 addrlo = addr & 0xffffffff;
249
250 sr = nlm_save_flags_kx();
251 __asm__ __volatile__(
252 ".set push\n\t"
253 ".set mips64\n\t"
254 "dsll32 $8, %1, 0\n\t"
255 "dsll32 $9, %2, 0\n\t"
256 "dsrl32 $9, $9, 0\n\t"
257 "or $9, $9, $8\n\t"
258 "lw %0, 0($9)\n\t"
259 ".set pop\n"
260 : "=r"(val)
261 : "r"(addrhi), "r"(addrlo)
262 : "$8", "$9");
263 nlm_restore_flags(sr);
264
265 return val;
266
267 }
268
269 static inline void
270 nlm_store_word_daddr(uint64_t addr, uint32_t val)
271 {
272 uint32_t addrhi, addrlo, sr;
273
274 addrhi = addr >> 32;
275 addrlo = addr & 0xffffffff;
276
277 sr = nlm_save_flags_kx();
278 __asm__ __volatile__(
279 ".set push\n\t"
280 ".set mips64\n\t"
281 "dsll32 $8, %1, 0\n\t"
282 "dsll32 $9, %2, 0\n\t"
283 "dsrl32 $9, $9, 0\n\t"
284 "or $9, $9, $8\n\t"
285 "sw %0, 0($9)\n\t"
286 ".set pop\n"
287 : : "r"(val), "r"(addrhi), "r"(addrlo)
288 : "$8", "$9", "memory");
289 nlm_restore_flags(sr);
290 }
291
292 static inline uint64_t
293 nlm_load_dword_daddr(uint64_t addr)
294 {
295 uint32_t addrh, addrl, sr;
296 uint32_t valh, vall;
297
298 addrh = addr >> 32;
299 addrl = addr & 0xffffffff;
300
301 sr = nlm_save_flags_kx();
302 __asm__ __volatile__(
303 ".set push\n\t"
304 ".set mips64\n\t"
305 "dsll32 $8, %2, 0\n\t"
306 "dsll32 $9, %3, 0\n\t"
307 "dsrl32 $9, $9, 0\n\t"
308 "or $9, $9, $8\n\t"
309 "ld $8, 0($9)\n\t"
310 "dsra32 %0, $8, 0\n\t"
311 "sll %1, $8, 0\n\t"
312 ".set pop\n"
313 : "=r"(valh), "=r"(vall)
314 : "r"(addrh), "r"(addrl)
315 : "$8", "$9");
316 nlm_restore_flags(sr);
317
318 return ((uint64_t)valh << 32) | vall;
319 }
320
321 static inline void
322 nlm_store_dword_daddr(uint64_t addr, uint64_t val)
323 {
324 uint32_t addrh, addrl, sr;
325 uint32_t valh, vall;
326
327 addrh = addr >> 32;
328 addrl = addr & 0xffffffff;
329 valh = val >> 32;
330 vall = val & 0xffffffff;
331
332 sr = nlm_save_flags_kx();
333 __asm__ __volatile__(
334 ".set push\n\t"
335 ".set mips64\n\t"
336 "dsll32 $8, %2, 0\n\t"
337 "dsll32 $9, %3, 0\n\t"
338 "dsrl32 $9, $9, 0\n\t"
339 "or $9, $9, $8\n\t"
340 "dsll32 $8, %0, 0\n\t"
341 "dsll32 $10, %1, 0\n\t"
342 "dsrl32 $10, $10, 0\n\t"
343 "or $8, $8, $10\n\t"
344 "sd $8, 0($9)\n\t"
345 ".set pop\n"
346 : : "r"(valh), "r"(vall), "r"(addrh), "r"(addrl)
347 : "$8", "$9", "memory");
348 nlm_restore_flags(sr);
349 }
350 #endif /* __mips_n64 */
351
352 static inline uint32_t
353 nlm_read_reg(uint64_t base, uint32_t reg)
354 {
355 volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
356
357 return *addr;
358 }
359
360 static inline void
361 nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
362 {
363 volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
364
365 *addr = val;
366 }
367
368 static inline uint64_t
369 nlm_read_reg64(uint64_t base, uint32_t reg)
370 {
371 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
372
373 return nlm_load_dword(addr);
374 }
375
376 static inline void
377 nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
378 {
379 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
380
381 return nlm_store_dword(addr, val);
382 }
383
384 /*
385 * Routines to store 32/64 bit values to 64 bit addresses,
386 * used when going thru XKPHYS to access registers
387 */
388 static inline uint32_t
389 nlm_read_reg_xkphys(uint64_t base, uint32_t reg)
390 {
391 uint64_t addr = base + reg * sizeof(uint32_t);
392
393 return nlm_load_word_daddr(addr);
394 }
395
396 static inline void
397 nlm_write_reg_xkphys(uint64_t base, uint32_t reg, uint32_t val)
398 {
399 uint64_t addr = base + reg * sizeof(uint32_t);
400 return nlm_store_word_daddr(addr, val);
401 }
402
403 static inline uint64_t
404 nlm_read_reg64_xkphys(uint64_t base, uint32_t reg)
405 {
406 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
407
408 return nlm_load_dword_daddr(addr);
409 }
410
411 static inline void
412 nlm_write_reg64_xkphys(uint64_t base, uint32_t reg, uint64_t val)
413 {
414 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
415
416 return nlm_store_dword_daddr(addr, val);
417 }
418
419 /* Location where IO base is mapped */
420 extern uint64_t xlp_io_base;
421
422 static inline uint64_t
423 nlm_pcicfg_base(uint32_t devoffset)
424 {
425 return xlp_io_base + devoffset;
426 }
427
428 static inline uint64_t
429 nlm_xkphys_map_pcibar0(uint64_t pcibase)
430 {
431 uint64_t paddr;
432
433 paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
434 return (uint64_t)0x9000000000000000 | paddr;
435 }
436
437 #endif
Cache object: 16f619ddf7e2c9e9b410e8dd62ce1da8
|