1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31 #ifndef _LINUXKPI_LINUX_IO_H_
32 #define _LINUXKPI_LINUX_IO_H_
33
34 #include <sys/endian.h>
35 #include <sys/types.h>
36
37 #include <machine/vm.h>
38
39 #include <linux/compiler.h>
40 #include <linux/types.h>
41 #if !defined(__arm__)
42 #include <asm/set_memory.h>
43 #endif
44
45 /*
46 * XXX This is all x86 specific. It should be bus space access.
47 */
48
49 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
50 #ifndef __io_br
51 #define __io_br() __compiler_membar()
52 #endif
53
54 #ifndef __io_ar
55 #ifdef rmb
56 #define __io_ar() rmb()
57 #else
58 #define __io_ar() __compiler_membar()
59 #endif
60 #endif
61
62 #ifndef __io_bw
63 #ifdef wmb
64 #define __io_bw() wmb()
65 #else
66 #define __io_bw() __compiler_membar()
67 #endif
68 #endif
69
70 #ifndef __io_aw
71 #define __io_aw() __compiler_membar()
72 #endif
73
74 /* Access MMIO registers atomically without barriers and byte swapping. */
75
76 static inline uint8_t
77 __raw_readb(const volatile void *addr)
78 {
79 return (*(const volatile uint8_t *)addr);
80 }
81 #define __raw_readb(addr) __raw_readb(addr)
82
83 static inline void
84 __raw_writeb(uint8_t v, volatile void *addr)
85 {
86 *(volatile uint8_t *)addr = v;
87 }
88 #define __raw_writeb(v, addr) __raw_writeb(v, addr)
89
90 static inline uint16_t
91 __raw_readw(const volatile void *addr)
92 {
93 return (*(const volatile uint16_t *)addr);
94 }
95 #define __raw_readw(addr) __raw_readw(addr)
96
97 static inline void
98 __raw_writew(uint16_t v, volatile void *addr)
99 {
100 *(volatile uint16_t *)addr = v;
101 }
102 #define __raw_writew(v, addr) __raw_writew(v, addr)
103
104 static inline uint32_t
105 __raw_readl(const volatile void *addr)
106 {
107 return (*(const volatile uint32_t *)addr);
108 }
109 #define __raw_readl(addr) __raw_readl(addr)
110
111 static inline void
112 __raw_writel(uint32_t v, volatile void *addr)
113 {
114 *(volatile uint32_t *)addr = v;
115 }
116 #define __raw_writel(v, addr) __raw_writel(v, addr)
117
118 #ifdef __LP64__
119 static inline uint64_t
120 __raw_readq(const volatile void *addr)
121 {
122 return (*(const volatile uint64_t *)addr);
123 }
124 #define __raw_readq(addr) __raw_readq(addr)
125
126 static inline void
127 __raw_writeq(uint64_t v, volatile void *addr)
128 {
129 *(volatile uint64_t *)addr = v;
130 }
131 #define __raw_writeq(v, addr) __raw_writeq(v, addr)
132 #endif
133
134 #define mmiowb() barrier()
135
136 /* Access little-endian MMIO registers atomically with memory barriers. */
137
138 #undef readb
139 static inline uint8_t
140 readb(const volatile void *addr)
141 {
142 uint8_t v;
143
144 __io_br();
145 v = *(const volatile uint8_t *)addr;
146 __io_ar();
147 return (v);
148 }
149 #define readb(addr) readb(addr)
150
151 #undef writeb
152 static inline void
153 writeb(uint8_t v, volatile void *addr)
154 {
155 __io_bw();
156 *(volatile uint8_t *)addr = v;
157 __io_aw();
158 }
159 #define writeb(v, addr) writeb(v, addr)
160
161 #undef readw
162 static inline uint16_t
163 readw(const volatile void *addr)
164 {
165 uint16_t v;
166
167 __io_br();
168 v = le16toh(__raw_readw(addr));
169 __io_ar();
170 return (v);
171 }
172 #define readw(addr) readw(addr)
173
174 #undef writew
175 static inline void
176 writew(uint16_t v, volatile void *addr)
177 {
178 __io_bw();
179 __raw_writew(htole16(v), addr);
180 __io_aw();
181 }
182 #define writew(v, addr) writew(v, addr)
183
184 #undef readl
185 static inline uint32_t
186 readl(const volatile void *addr)
187 {
188 uint32_t v;
189
190 __io_br();
191 v = le32toh(__raw_readl(addr));
192 __io_ar();
193 return (v);
194 }
195 #define readl(addr) readl(addr)
196
197 #undef writel
198 static inline void
199 writel(uint32_t v, volatile void *addr)
200 {
201 __io_bw();
202 __raw_writel(htole32(v), addr);
203 __io_aw();
204 }
205 #define writel(v, addr) writel(v, addr)
206
207 #undef readq
208 #undef writeq
209 #ifdef __LP64__
210 static inline uint64_t
211 readq(const volatile void *addr)
212 {
213 uint64_t v;
214
215 __io_br();
216 v = le64toh(__raw_readq(addr));
217 __io_ar();
218 return (v);
219 }
220 #define readq(addr) readq(addr)
221
222 static inline void
223 writeq(uint64_t v, volatile void *addr)
224 {
225 __io_bw();
226 __raw_writeq(htole64(v), addr);
227 __io_aw();
228 }
229 #define writeq(v, addr) writeq(v, addr)
230 #endif
231
232 /* Access little-endian MMIO registers atomically without memory barriers. */
233
234 #undef readb_relaxed
235 static inline uint8_t
236 readb_relaxed(const volatile void *addr)
237 {
238 return (__raw_readb(addr));
239 }
240 #define readb_relaxed(addr) readb_relaxed(addr)
241
242 #undef writeb_relaxed
243 static inline void
244 writeb_relaxed(uint8_t v, volatile void *addr)
245 {
246 __raw_writeb(v, addr);
247 }
248 #define writeb_relaxed(v, addr) writeb_relaxed(v, addr)
249
250 #undef readw_relaxed
251 static inline uint16_t
252 readw_relaxed(const volatile void *addr)
253 {
254 return (le16toh(__raw_readw(addr)));
255 }
256 #define readw_relaxed(addr) readw_relaxed(addr)
257
258 #undef writew_relaxed
259 static inline void
260 writew_relaxed(uint16_t v, volatile void *addr)
261 {
262 __raw_writew(htole16(v), addr);
263 }
264 #define writew_relaxed(v, addr) writew_relaxed(v, addr)
265
266 #undef readl_relaxed
267 static inline uint32_t
268 readl_relaxed(const volatile void *addr)
269 {
270 return (le32toh(__raw_readl(addr)));
271 }
272 #define readl_relaxed(addr) readl_relaxed(addr)
273
274 #undef writel_relaxed
275 static inline void
276 writel_relaxed(uint32_t v, volatile void *addr)
277 {
278 __raw_writel(htole32(v), addr);
279 }
280 #define writel_relaxed(v, addr) writel_relaxed(v, addr)
281
282 #undef readq_relaxed
283 #undef writeq_relaxed
284 #ifdef __LP64__
285 static inline uint64_t
286 readq_relaxed(const volatile void *addr)
287 {
288 return (le64toh(__raw_readq(addr)));
289 }
290 #define readq_relaxed(addr) readq_relaxed(addr)
291
292 static inline void
293 writeq_relaxed(uint64_t v, volatile void *addr)
294 {
295 __raw_writeq(htole64(v), addr);
296 }
297 #define writeq_relaxed(v, addr) writeq_relaxed(v, addr)
298 #endif
299
300 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
301
302 #undef ioread8
303 static inline uint8_t
304 ioread8(const volatile void *addr)
305 {
306 return (readb(addr));
307 }
308 #define ioread8(addr) ioread8(addr)
309
310 #undef ioread16
311 static inline uint16_t
312 ioread16(const volatile void *addr)
313 {
314 return (readw(addr));
315 }
316 #define ioread16(addr) ioread16(addr)
317
318 #undef ioread16be
319 static inline uint16_t
320 ioread16be(const volatile void *addr)
321 {
322 uint16_t v;
323
324 __io_br();
325 v = (be16toh(__raw_readw(addr)));
326 __io_ar();
327
328 return (v);
329 }
330 #define ioread16be(addr) ioread16be(addr)
331
332 #undef ioread32
333 static inline uint32_t
334 ioread32(const volatile void *addr)
335 {
336 return (readl(addr));
337 }
338 #define ioread32(addr) ioread32(addr)
339
340 #undef ioread32be
341 static inline uint32_t
342 ioread32be(const volatile void *addr)
343 {
344 uint32_t v;
345
346 __io_br();
347 v = (be32toh(__raw_readl(addr)));
348 __io_ar();
349
350 return (v);
351 }
352 #define ioread32be(addr) ioread32be(addr)
353
354 #undef iowrite8
355 static inline void
356 iowrite8(uint8_t v, volatile void *addr)
357 {
358 writeb(v, addr);
359 }
360 #define iowrite8(v, addr) iowrite8(v, addr)
361
362 #undef iowrite16
363 static inline void
364 iowrite16(uint16_t v, volatile void *addr)
365 {
366 writew(v, addr);
367 }
368 #define iowrite16 iowrite16
369
370 #undef iowrite32
371 static inline void
372 iowrite32(uint32_t v, volatile void *addr)
373 {
374 writel(v, addr);
375 }
376 #define iowrite32(v, addr) iowrite32(v, addr)
377
378 #undef iowrite32be
379 static inline void
380 iowrite32be(uint32_t v, volatile void *addr)
381 {
382 __io_bw();
383 __raw_writel(htobe32(v), addr);
384 __io_aw();
385 }
386 #define iowrite32be(v, addr) iowrite32be(v, addr)
387
388 #if defined(__i386__) || defined(__amd64__)
389 static inline void
390 _outb(u_char data, u_int port)
391 {
392 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
393 }
394 #endif
395
396 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
397 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
398 #else
399 static __inline void *
400 _ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
401 {
402 return (NULL);
403 }
404 #endif
405
406 #ifdef VM_MEMATTR_DEVICE
407 #define ioremap_nocache(addr, size) \
408 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
409 #define ioremap_wt(addr, size) \
410 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
411 #define ioremap(addr, size) \
412 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
413 #else
414 #define ioremap_nocache(addr, size) \
415 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
416 #define ioremap_wt(addr, size) \
417 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
418 #define ioremap(addr, size) \
419 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
420 #endif
421 #ifdef VM_MEMATTR_WRITE_COMBINING
422 #define ioremap_wc(addr, size) \
423 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
424 #else
425 #define ioremap_wc(addr, size) ioremap_nocache(addr, size)
426 #endif
427 #define ioremap_cache(addr, size) \
428 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
429 void iounmap(void *addr);
430
431 #define memset_io(a, b, c) memset((a), (b), (c))
432 #define memcpy_fromio(a, b, c) memcpy((a), (b), (c))
433 #define memcpy_toio(a, b, c) memcpy((a), (b), (c))
434
435 static inline void
436 __iowrite32_copy(void *to, const void *from, size_t count)
437 {
438 const uint32_t *src;
439 uint32_t *dst;
440 int i;
441
442 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
443 __raw_writel(*src, dst);
444 }
445
446 static inline void
447 __iowrite64_copy(void *to, const void *from, size_t count)
448 {
449 #ifdef __LP64__
450 const uint64_t *src;
451 uint64_t *dst;
452 int i;
453
454 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
455 __raw_writeq(*src, dst);
456 #else
457 __iowrite32_copy(to, from, count * 2);
458 #endif
459 }
460
461 static inline void
462 __ioread32_copy(void *to, const void *from, size_t count)
463 {
464 const uint32_t *src;
465 uint32_t *dst;
466 int i;
467
468 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
469 *dst = __raw_readl(src);
470 }
471
472 static inline void
473 __ioread64_copy(void *to, const void *from, size_t count)
474 {
475 #ifdef __LP64__
476 const uint64_t *src;
477 uint64_t *dst;
478 int i;
479
480 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
481 *dst = __raw_readq(src);
482 #else
483 __ioread32_copy(to, from, count * 2);
484 #endif
485 }
486
487 enum {
488 MEMREMAP_WB = 1 << 0,
489 MEMREMAP_WT = 1 << 1,
490 MEMREMAP_WC = 1 << 2,
491 };
492
493 static inline void *
494 memremap(resource_size_t offset, size_t size, unsigned long flags)
495 {
496 void *addr = NULL;
497
498 if ((flags & MEMREMAP_WB) &&
499 (addr = ioremap_cache(offset, size)) != NULL)
500 goto done;
501 if ((flags & MEMREMAP_WT) &&
502 (addr = ioremap_wt(offset, size)) != NULL)
503 goto done;
504 if ((flags & MEMREMAP_WC) &&
505 (addr = ioremap_wc(offset, size)) != NULL)
506 goto done;
507 done:
508 return (addr);
509 }
510
511 static inline void
512 memunmap(void *addr)
513 {
514 /* XXX May need to check if this is RAM */
515 iounmap(addr);
516 }
517
518 #define __MTRR_ID_BASE 1
519 int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
520 void lkpi_arch_phys_wc_del(int);
521 #define arch_phys_wc_add(...) lkpi_arch_phys_wc_add(__VA_ARGS__)
522 #define arch_phys_wc_del(...) lkpi_arch_phys_wc_del(__VA_ARGS__)
523 #define arch_phys_wc_index(x) \
524 (((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
525
526 #if defined(__amd64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) || defined(__riscv)
527 static inline int
528 arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
529 {
530
531 return (set_memory_wc(start, size >> PAGE_SHIFT));
532 }
533
534 static inline void
535 arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
536 {
537 set_memory_wb(start, size >> PAGE_SHIFT);
538 }
539 #endif
540
541 #endif /* _LINUXKPI_LINUX_IO_H_ */
Cache object: e97ee26e9fb6270bad458c16ed4e3d79
|