1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * from tahoe: in_cksum.c 1.2 86/01/05
30 * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * MPsafe: alfred
38 */
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42
43 #include <netinet/in.h>
44 #include <netinet/in_systm.h>
45 #include <netinet/ip.h>
46
47 #include <machine/in_cksum.h>
48
49 /*
50 * Checksum routine for Internet Protocol family headers.
51 *
52 * This routine is very heavily used in the network
53 * code and should be modified for each CPU to be as fast as possible.
54 *
55 * This implementation is 386 version.
56 */
57
58 #undef ADDCARRY
59 #define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff
60 /*
61 * icc needs to be special cased here, as the asm code below results
62 * in broken code if compiled with icc.
63 */
64 #if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
65 /* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */
66 #define REDUCE32 \
67 { \
68 q_util.q = sum; \
69 sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
70 }
71 #define REDUCE16 \
72 { \
73 q_util.q = sum; \
74 l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
75 sum = l_util.s[0] + l_util.s[1]; \
76 ADDCARRY(sum); \
77 }
78 #endif
79 #define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
80
81 #if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
82 static const u_int32_t in_masks[] = {
83 /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
84 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
85 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
86 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
87 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
88 };
89
90 union l_util {
91 u_int16_t s[2];
92 u_int32_t l;
93 };
94 union q_util {
95 u_int16_t s[4];
96 u_int32_t l[2];
97 u_int64_t q;
98 };
99
100 static u_int64_t
101 in_cksumdata(const u_int32_t *lw, int len)
102 {
103 u_int64_t sum = 0;
104 u_int64_t prefilled;
105 int offset;
106 union q_util q_util;
107
108 if ((3 & (long) lw) == 0 && len == 20) {
109 sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
110 REDUCE32;
111 return sum;
112 }
113
114 if ((offset = 3 & (long) lw) != 0) {
115 const u_int32_t *masks = in_masks + (offset << 2);
116 lw = (u_int32_t *) (((long) lw) - offset);
117 sum = *lw++ & masks[len >= 3 ? 3 : len];
118 len -= 4 - offset;
119 if (len <= 0) {
120 REDUCE32;
121 return sum;
122 }
123 }
124 #if 0
125 /*
126 * Force to cache line boundary.
127 */
128 offset = 32 - (0x1f & (long) lw);
129 if (offset < 32 && len > offset) {
130 len -= offset;
131 if (4 & offset) {
132 sum += (u_int64_t) lw[0];
133 lw += 1;
134 }
135 if (8 & offset) {
136 sum += (u_int64_t) lw[0] + lw[1];
137 lw += 2;
138 }
139 if (16 & offset) {
140 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
141 lw += 4;
142 }
143 }
144 #endif
145 /*
146 * access prefilling to start load of next cache line.
147 * then add current cache line
148 * save result of prefilling for loop iteration.
149 */
150 prefilled = lw[0];
151 while ((len -= 32) >= 4) {
152 u_int64_t prefilling = lw[8];
153 sum += prefilled + lw[1] + lw[2] + lw[3]
154 + lw[4] + lw[5] + lw[6] + lw[7];
155 lw += 8;
156 prefilled = prefilling;
157 }
158 if (len >= 0) {
159 sum += prefilled + lw[1] + lw[2] + lw[3]
160 + lw[4] + lw[5] + lw[6] + lw[7];
161 lw += 8;
162 } else {
163 len += 32;
164 }
165 while ((len -= 16) >= 0) {
166 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
167 lw += 4;
168 }
169 len += 16;
170 while ((len -= 4) >= 0) {
171 sum += (u_int64_t) *lw++;
172 }
173 len += 4;
174 if (len > 0)
175 sum += (u_int64_t) (in_masks[len] & *lw);
176 REDUCE32;
177 return sum;
178 }
179
180 u_short
181 in_addword(u_short a, u_short b)
182 {
183 u_int64_t sum = a + b;
184
185 ADDCARRY(sum);
186 return (sum);
187 }
188
189 u_short
190 in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
191 {
192 u_int64_t sum;
193 union q_util q_util;
194 union l_util l_util;
195
196 sum = (u_int64_t) a + b + c;
197 REDUCE16;
198 return (sum);
199 }
200
201 u_short
202 in_cksum_skip(struct mbuf *m, int len, int skip)
203 {
204 u_int64_t sum = 0;
205 int mlen = 0;
206 int clen = 0;
207 caddr_t addr;
208 union q_util q_util;
209 union l_util l_util;
210
211 len -= skip;
212 for (; skip && m; m = m->m_next) {
213 if (m->m_len > skip) {
214 mlen = m->m_len - skip;
215 addr = mtod(m, caddr_t) + skip;
216 goto skip_start;
217 } else {
218 skip -= m->m_len;
219 }
220 }
221
222 for (; m && len; m = m->m_next) {
223 if (m->m_len == 0)
224 continue;
225 mlen = m->m_len;
226 addr = mtod(m, caddr_t);
227 skip_start:
228 if (len < mlen)
229 mlen = len;
230 if ((clen ^ (long) addr) & 1)
231 sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8;
232 else
233 sum += in_cksumdata((const u_int32_t *)addr, mlen);
234
235 clen += mlen;
236 len -= mlen;
237 }
238 REDUCE16;
239 return (~sum & 0xffff);
240 }
241
242 u_int in_cksum_hdr(const struct ip *ip)
243 {
244 u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip));
245 union q_util q_util;
246 union l_util l_util;
247
248 REDUCE16;
249 return (~sum & 0xffff);
250 }
251 #else
252
253 /*
254 * These asm statements require __volatile because they pass information
255 * via the condition codes. GCC does not currently provide a way to specify
256 * the condition codes as an input or output operand.
257 *
258 * The LOAD macro below is effectively a prefetch into cache. GCC will
259 * load the value into a register but will not use it. Since modern CPUs
260 * reorder operations, this will generally take place in parallel with
261 * other calculations.
262 */
263 #define ADD(n) __asm __volatile \
264 ("addl %1, %0" : "+r" (sum) : \
265 "g" (((const u_int32_t *)w)[n / 4]))
266 #define ADDC(n) __asm __volatile \
267 ("adcl %1, %0" : "+r" (sum) : \
268 "g" (((const u_int32_t *)w)[n / 4]))
269 #define LOAD(n) __asm __volatile \
270 ("" : : "r" (((const u_int32_t *)w)[n / 4]))
271 #define MOP __asm __volatile \
272 ("adcl $0, %0" : "+r" (sum))
273
274 u_short
275 in_cksum_skip(m, len, skip)
276 struct mbuf *m;
277 int len;
278 int skip;
279 {
280 register u_short *w;
281 register unsigned sum = 0;
282 register int mlen = 0;
283 int byte_swapped = 0;
284 union { char c[2]; u_short s; } su;
285
286 len -= skip;
287 for (; skip && m; m = m->m_next) {
288 if (m->m_len > skip) {
289 mlen = m->m_len - skip;
290 w = (u_short *)(mtod(m, u_char *) + skip);
291 goto skip_start;
292 } else {
293 skip -= m->m_len;
294 }
295 }
296
297 for (;m && len; m = m->m_next) {
298 if (m->m_len == 0)
299 continue;
300 w = mtod(m, u_short *);
301 if (mlen == -1) {
302 /*
303 * The first byte of this mbuf is the continuation
304 * of a word spanning between this mbuf and the
305 * last mbuf.
306 */
307
308 /* su.c[0] is already saved when scanning previous
309 * mbuf. sum was REDUCEd when we found mlen == -1
310 */
311 su.c[1] = *(u_char *)w;
312 sum += su.s;
313 w = (u_short *)((char *)w + 1);
314 mlen = m->m_len - 1;
315 len--;
316 } else
317 mlen = m->m_len;
318 skip_start:
319 if (len < mlen)
320 mlen = len;
321 len -= mlen;
322 /*
323 * Force to long boundary so we do longword aligned
324 * memory operations
325 */
326 if (3 & (int) w) {
327 REDUCE;
328 if ((1 & (int) w) && (mlen > 0)) {
329 sum <<= 8;
330 su.c[0] = *(char *)w;
331 w = (u_short *)((char *)w + 1);
332 mlen--;
333 byte_swapped = 1;
334 }
335 if ((2 & (int) w) && (mlen >= 2)) {
336 sum += *w++;
337 mlen -= 2;
338 }
339 }
340 /*
341 * Advance to a 486 cache line boundary.
342 */
343 if (4 & (int) w && mlen >= 4) {
344 ADD(0);
345 MOP;
346 w += 2;
347 mlen -= 4;
348 }
349 if (8 & (int) w && mlen >= 8) {
350 ADD(0);
351 ADDC(4);
352 MOP;
353 w += 4;
354 mlen -= 8;
355 }
356 /*
357 * Do as much of the checksum as possible 32 bits at at time.
358 * In fact, this loop is unrolled to make overhead from
359 * branches &c small.
360 */
361 mlen -= 1;
362 while ((mlen -= 32) >= 0) {
363 /*
364 * Add with carry 16 words and fold in the last
365 * carry by adding a 0 with carry.
366 *
367 * The early ADD(16) and the LOAD(32) are to load
368 * the next 2 cache lines in advance on 486's. The
369 * 486 has a penalty of 2 clock cycles for loading
370 * a cache line, plus whatever time the external
371 * memory takes to load the first word(s) addressed.
372 * These penalties are unavoidable. Subsequent
373 * accesses to a cache line being loaded (and to
374 * other external memory?) are delayed until the
375 * whole load finishes. These penalties are mostly
376 * avoided by not accessing external memory for
377 * 8 cycles after the ADD(16) and 12 cycles after
378 * the LOAD(32). The loop terminates when mlen
379 * is initially 33 (not 32) to guaranteed that
380 * the LOAD(32) is within bounds.
381 */
382 ADD(16);
383 ADDC(0);
384 ADDC(4);
385 ADDC(8);
386 ADDC(12);
387 LOAD(32);
388 ADDC(20);
389 ADDC(24);
390 ADDC(28);
391 MOP;
392 w += 16;
393 }
394 mlen += 32 + 1;
395 if (mlen >= 32) {
396 ADD(16);
397 ADDC(0);
398 ADDC(4);
399 ADDC(8);
400 ADDC(12);
401 ADDC(20);
402 ADDC(24);
403 ADDC(28);
404 MOP;
405 w += 16;
406 mlen -= 32;
407 }
408 if (mlen >= 16) {
409 ADD(0);
410 ADDC(4);
411 ADDC(8);
412 ADDC(12);
413 MOP;
414 w += 8;
415 mlen -= 16;
416 }
417 if (mlen >= 8) {
418 ADD(0);
419 ADDC(4);
420 MOP;
421 w += 4;
422 mlen -= 8;
423 }
424 if (mlen == 0 && byte_swapped == 0)
425 continue; /* worth 1% maybe ?? */
426 REDUCE;
427 while ((mlen -= 2) >= 0) {
428 sum += *w++;
429 }
430 if (byte_swapped) {
431 sum <<= 8;
432 byte_swapped = 0;
433 if (mlen == -1) {
434 su.c[1] = *(char *)w;
435 sum += su.s;
436 mlen = 0;
437 } else
438 mlen = -1;
439 } else if (mlen == -1)
440 /*
441 * This mbuf has odd number of bytes.
442 * There could be a word split betwen
443 * this mbuf and the next mbuf.
444 * Save the last byte (to prepend to next mbuf).
445 */
446 su.c[0] = *(char *)w;
447 }
448
449 if (len)
450 printf("%s: out of data by %d\n", __func__, len);
451 if (mlen == -1) {
452 /* The last mbuf has odd # of bytes. Follow the
453 standard (the odd byte is shifted left by 8 bits) */
454 su.c[1] = 0;
455 sum += su.s;
456 }
457 REDUCE;
458 return (~sum & 0xffff);
459 }
460 #endif
Cache object: 6c94211ad248ea20b2abf93306d10a5e
|