1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29
30 /*
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
33 * All Rights Reserved
34 *
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
37 * contributors.
38 */
39
40 #ifndef _SYS_BYTEORDER_H
41 #define _SYS_BYTEORDER_H
42
43 #include <sys/endian.h>
44 #include <netinet/in.h>
45 #include <sys/isa_defs.h>
46 #include <inttypes.h>
47
48 #if defined(__GNUC__) && defined(_ASM_INLINES) && \
49 (defined(__i386) || defined(__amd64))
50 #include <asm/byteorder.h>
51 #endif
52
53 #ifdef __cplusplus
54 extern "C" {
55 #endif
56
57 /*
58 * macros for conversion between host and (internet) network byte order
59 */
60 #if !defined(_XPG4_2) || defined(__EXTENSIONS__)
61
62 #ifdef __COVERITY__
63 /*
64 * Coverity's taint warnings from byteswapping are false positives for us.
65 * Suppress them by hiding byteswapping from Coverity.
66 */
67 #define BSWAP_8(x) ((x) & 0xff)
68 #define BSWAP_16(x) ((x) & 0xffff)
69 #define BSWAP_32(x) ((x) & 0xffffffff)
70 #define BSWAP_64(x) (x)
71
72 #else /* __COVERITY__ */
73
74 /*
75 * Macros to reverse byte order
76 */
77 #define BSWAP_8(x) ((x) & 0xff)
78 #define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
79 #define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
80 #define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
81
82 #endif /* __COVERITY__ */
83
84 #define BMASK_8(x) ((x) & 0xff)
85 #define BMASK_16(x) ((x) & 0xffff)
86 #define BMASK_32(x) ((x) & 0xffffffff)
87 #define BMASK_64(x) (x)
88
89 /*
90 * Macros to convert from a specific byte order to/from native byte order
91 */
92 #ifdef _ZFS_BIG_ENDIAN
93 #define BE_8(x) BMASK_8(x)
94 #define BE_16(x) BMASK_16(x)
95 #define BE_32(x) BMASK_32(x)
96 #define BE_64(x) BMASK_64(x)
97 #define LE_8(x) BSWAP_8(x)
98 #define LE_16(x) BSWAP_16(x)
99 #define LE_32(x) BSWAP_32(x)
100 #define LE_64(x) BSWAP_64(x)
101 #else
102 #define LE_8(x) BMASK_8(x)
103 #define LE_16(x) BMASK_16(x)
104 #define LE_32(x) BMASK_32(x)
105 #define LE_64(x) BMASK_64(x)
106 #define BE_8(x) BSWAP_8(x)
107 #define BE_16(x) BSWAP_16(x)
108 #define BE_32(x) BSWAP_32(x)
109 #define BE_64(x) BSWAP_64(x)
110 #endif
111
112 #ifdef _ZFS_BIG_ENDIAN
113 static __inline__ uint64_t
114 htonll(uint64_t n)
115 {
116 return (n);
117 }
118
119 static __inline__ uint64_t
120 ntohll(uint64_t n)
121 {
122 return (n);
123 }
124 #else
125 static __inline__ uint64_t
126 htonll(uint64_t n)
127 {
128 return ((((uint64_t)htonl(n)) << 32) + htonl(n >> 32));
129 }
130
131 static __inline__ uint64_t
132 ntohll(uint64_t n)
133 {
134 return ((((uint64_t)ntohl(n)) << 32) + ntohl(n >> 32));
135 }
136 #endif
137
138 /*
139 * Macros to read unaligned values from a specific byte order to
140 * native byte order
141 */
142
143 #define BE_IN8(xa) \
144 *((uint8_t *)(xa))
145
146 #define BE_IN16(xa) \
147 (((uint16_t)BE_IN8(xa) << 8) | BE_IN8((uint8_t *)(xa)+1))
148
149 #define BE_IN32(xa) \
150 (((uint32_t)BE_IN16(xa) << 16) | BE_IN16((uint8_t *)(xa)+2))
151
152 #define BE_IN64(xa) \
153 (((uint64_t)BE_IN32(xa) << 32) | BE_IN32((uint8_t *)(xa)+4))
154
155 #define LE_IN8(xa) \
156 *((uint8_t *)(xa))
157
158 #define LE_IN16(xa) \
159 (((uint16_t)LE_IN8((uint8_t *)(xa) + 1) << 8) | LE_IN8(xa))
160
161 #define LE_IN32(xa) \
162 (((uint32_t)LE_IN16((uint8_t *)(xa) + 2) << 16) | LE_IN16(xa))
163
164 #define LE_IN64(xa) \
165 (((uint64_t)LE_IN32((uint8_t *)(xa) + 4) << 32) | LE_IN32(xa))
166
167 /*
168 * Macros to write unaligned values from native byte order to a specific byte
169 * order.
170 */
171
172 #define BE_OUT8(xa, yv) *((uint8_t *)(xa)) = (uint8_t)(yv);
173
174 #define BE_OUT16(xa, yv) \
175 BE_OUT8((uint8_t *)(xa) + 1, yv); \
176 BE_OUT8((uint8_t *)(xa), (yv) >> 8);
177
178 #define BE_OUT32(xa, yv) \
179 BE_OUT16((uint8_t *)(xa) + 2, yv); \
180 BE_OUT16((uint8_t *)(xa), (yv) >> 16);
181
182 #define BE_OUT64(xa, yv) \
183 BE_OUT32((uint8_t *)(xa) + 4, yv); \
184 BE_OUT32((uint8_t *)(xa), (yv) >> 32);
185
186 #define LE_OUT8(xa, yv) *((uint8_t *)(xa)) = (uint8_t)(yv);
187
188 #define LE_OUT16(xa, yv) \
189 LE_OUT8((uint8_t *)(xa), yv); \
190 LE_OUT8((uint8_t *)(xa) + 1, (yv) >> 8);
191
192 #define LE_OUT32(xa, yv) \
193 LE_OUT16((uint8_t *)(xa), yv); \
194 LE_OUT16((uint8_t *)(xa) + 2, (yv) >> 16);
195
196 #define LE_OUT64(xa, yv) \
197 LE_OUT32((uint8_t *)(xa), yv); \
198 LE_OUT32((uint8_t *)(xa) + 4, (yv) >> 32);
199
200 #endif /* !defined(_XPG4_2) || defined(__EXTENSIONS__) */
201
202 #ifdef __cplusplus
203 }
204 #endif
205
206 #endif /* _SYS_BYTEORDER_H */
Cache object: ff5593f5d22e237edd4cd135658cce1c
|