1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * from tahoe: in_cksum.c 1.2 86/01/05
30 * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * MPsafe: alfred
38 */
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42
43 #include <netinet/in.h>
44 #include <netinet/in_systm.h>
45 #include <netinet/ip.h>
46
47 #include <machine/in_cksum.h>
48
49 /*
50 * Checksum routine for Internet Protocol family headers.
51 *
52 * This routine is very heavily used in the network
53 * code and should be modified for each CPU to be as fast as possible.
54 *
55 * This implementation is 386 version.
56 */
57
58 #undef ADDCARRY
59 #define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff
60 /*
61 * icc needs to be special cased here, as the asm code below results
62 * in broken code if compiled with icc.
63 */
64 #if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
65 /* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */
66 #define REDUCE32 \
67 { \
68 q_util.q = sum; \
69 sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
70 }
71 #define REDUCE16 \
72 { \
73 q_util.q = sum; \
74 l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
75 sum = l_util.s[0] + l_util.s[1]; \
76 ADDCARRY(sum); \
77 }
78 #endif
79 #define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
80
81 #if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
82 static const u_int32_t in_masks[] = {
83 /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
84 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
85 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
86 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
87 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
88 };
89
90 union l_util {
91 u_int16_t s[2];
92 u_int32_t l;
93 };
94 union q_util {
95 u_int16_t s[4];
96 u_int32_t l[2];
97 u_int64_t q;
98 };
99
100 static u_int64_t
101 in_cksumdata(const u_int32_t *lw, int len)
102 {
103 u_int64_t sum = 0;
104 u_int64_t prefilled;
105 int offset;
106 union q_util q_util;
107
108 if ((3 & (long) lw) == 0 && len == 20) {
109 sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
110 REDUCE32;
111 return sum;
112 }
113
114 if ((offset = 3 & (long) lw) != 0) {
115 const u_int32_t *masks = in_masks + (offset << 2);
116 lw = (u_int32_t *) (((long) lw) - offset);
117 sum = *lw++ & masks[len >= 3 ? 3 : len];
118 len -= 4 - offset;
119 if (len <= 0) {
120 REDUCE32;
121 return sum;
122 }
123 }
124 #if 0
125 /*
126 * Force to cache line boundary.
127 */
128 offset = 32 - (0x1f & (long) lw);
129 if (offset < 32 && len > offset) {
130 len -= offset;
131 if (4 & offset) {
132 sum += (u_int64_t) lw[0];
133 lw += 1;
134 }
135 if (8 & offset) {
136 sum += (u_int64_t) lw[0] + lw[1];
137 lw += 2;
138 }
139 if (16 & offset) {
140 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
141 lw += 4;
142 }
143 }
144 #endif
145 /*
146 * access prefilling to start load of next cache line.
147 * then add current cache line
148 * save result of prefilling for loop iteration.
149 */
150 prefilled = lw[0];
151 while ((len -= 32) >= 4) {
152 u_int64_t prefilling = lw[8];
153 sum += prefilled + lw[1] + lw[2] + lw[3]
154 + lw[4] + lw[5] + lw[6] + lw[7];
155 lw += 8;
156 prefilled = prefilling;
157 }
158 if (len >= 0) {
159 sum += prefilled + lw[1] + lw[2] + lw[3]
160 + lw[4] + lw[5] + lw[6] + lw[7];
161 lw += 8;
162 } else {
163 len += 32;
164 }
165 while ((len -= 16) >= 0) {
166 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
167 lw += 4;
168 }
169 len += 16;
170 while ((len -= 4) >= 0) {
171 sum += (u_int64_t) *lw++;
172 }
173 len += 4;
174 if (len > 0)
175 sum += (u_int64_t) (in_masks[len] & *lw);
176 REDUCE32;
177 return sum;
178 }
179
180 u_short
181 in_addword(u_short a, u_short b)
182 {
183 u_int64_t sum = a + b;
184
185 ADDCARRY(sum);
186 return (sum);
187 }
188
189 u_short
190 in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
191 {
192 u_int64_t sum;
193 union q_util q_util;
194 union l_util l_util;
195
196 sum = (u_int64_t) a + b + c;
197 REDUCE16;
198 return (sum);
199 }
200
201 u_short
202 in_cksum_skip(struct mbuf *m, int len, int skip)
203 {
204 u_int64_t sum = 0;
205 int mlen = 0;
206 int clen = 0;
207 caddr_t addr;
208 union q_util q_util;
209 union l_util l_util;
210
211 len -= skip;
212 for (; skip && m; m = m->m_next) {
213 if (m->m_len > skip) {
214 mlen = m->m_len - skip;
215 addr = mtod(m, caddr_t) + skip;
216 goto skip_start;
217 } else {
218 skip -= m->m_len;
219 }
220 }
221
222 for (; m && len; m = m->m_next) {
223 if (m->m_len == 0)
224 continue;
225 mlen = m->m_len;
226 addr = mtod(m, caddr_t);
227 skip_start:
228 if (len < mlen)
229 mlen = len;
230 if ((clen ^ (long) addr) & 1)
231 sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8;
232 else
233 sum += in_cksumdata((const u_int32_t *)addr, mlen);
234
235 clen += mlen;
236 len -= mlen;
237 }
238 REDUCE16;
239 return (~sum & 0xffff);
240 }
241
242 u_int in_cksum_hdr(const struct ip *ip)
243 {
244 u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip));
245 union q_util q_util;
246 union l_util l_util;
247
248 REDUCE16;
249 return (~sum & 0xffff);
250 }
251 #else
252
253 /*
254 * These asm statements require __volatile because they pass information
255 * via the condition codes. GCC does not currently provide a way to specify
256 * the condition codes as an input or output operand.
257 *
258 * The LOAD macro below is effectively a prefetch into cache. GCC will
259 * load the value into a register but will not use it. Since modern CPUs
260 * reorder operations, this will generally take place in parallel with
261 * other calculations.
262 */
263 u_short
264 in_cksum_skip(m, len, skip)
265 struct mbuf *m;
266 int len;
267 int skip;
268 {
269 register u_short *w;
270 register unsigned sum = 0;
271 register int mlen = 0;
272 int byte_swapped = 0;
273 union { char c[2]; u_short s; } su;
274
275 len -= skip;
276 for (; skip && m; m = m->m_next) {
277 if (m->m_len > skip) {
278 mlen = m->m_len - skip;
279 w = (u_short *)(mtod(m, u_char *) + skip);
280 goto skip_start;
281 } else {
282 skip -= m->m_len;
283 }
284 }
285
286 for (;m && len; m = m->m_next) {
287 if (m->m_len == 0)
288 continue;
289 w = mtod(m, u_short *);
290 if (mlen == -1) {
291 /*
292 * The first byte of this mbuf is the continuation
293 * of a word spanning between this mbuf and the
294 * last mbuf.
295 */
296
297 /* su.c[0] is already saved when scanning previous
298 * mbuf. sum was REDUCEd when we found mlen == -1
299 */
300 su.c[1] = *(u_char *)w;
301 sum += su.s;
302 w = (u_short *)((char *)w + 1);
303 mlen = m->m_len - 1;
304 len--;
305 } else
306 mlen = m->m_len;
307 skip_start:
308 if (len < mlen)
309 mlen = len;
310 len -= mlen;
311 /*
312 * Force to long boundary so we do longword aligned
313 * memory operations
314 */
315 if (3 & (int) w) {
316 REDUCE;
317 if ((1 & (int) w) && (mlen > 0)) {
318 sum <<= 8;
319 su.c[0] = *(char *)w;
320 w = (u_short *)((char *)w + 1);
321 mlen--;
322 byte_swapped = 1;
323 }
324 if ((2 & (int) w) && (mlen >= 2)) {
325 sum += *w++;
326 mlen -= 2;
327 }
328 }
329 /*
330 * Advance to a 486 cache line boundary.
331 */
332 if (4 & (int) w && mlen >= 4) {
333 __asm __volatile (
334 "addl %1, %0\n"
335 "adcl $0, %0"
336 : "+r" (sum)
337 : "g" (((const u_int32_t *)w)[0])
338 );
339 w += 2;
340 mlen -= 4;
341 }
342 if (8 & (int) w && mlen >= 8) {
343 __asm __volatile (
344 "addl %1, %0\n"
345 "adcl %2, %0\n"
346 "adcl $0, %0"
347 : "+r" (sum)
348 : "g" (((const u_int32_t *)w)[0]),
349 "g" (((const u_int32_t *)w)[1])
350 );
351 w += 4;
352 mlen -= 8;
353 }
354 /*
355 * Do as much of the checksum as possible 32 bits at at time.
356 * In fact, this loop is unrolled to make overhead from
357 * branches &c small.
358 */
359 mlen -= 1;
360 while ((mlen -= 32) >= 0) {
361 /*
362 * Add with carry 16 words and fold in the last
363 * carry by adding a 0 with carry.
364 *
365 * The early ADD(16) and the LOAD(32) are to load
366 * the next 2 cache lines in advance on 486's. The
367 * 486 has a penalty of 2 clock cycles for loading
368 * a cache line, plus whatever time the external
369 * memory takes to load the first word(s) addressed.
370 * These penalties are unavoidable. Subsequent
371 * accesses to a cache line being loaded (and to
372 * other external memory?) are delayed until the
373 * whole load finishes. These penalties are mostly
374 * avoided by not accessing external memory for
375 * 8 cycles after the ADD(16) and 12 cycles after
376 * the LOAD(32). The loop terminates when mlen
377 * is initially 33 (not 32) to guaranteed that
378 * the LOAD(32) is within bounds.
379 */
380 __asm __volatile (
381 "addl %1, %0\n"
382 "adcl %2, %0\n"
383 "adcl %3, %0\n"
384 "adcl %4, %0\n"
385 "adcl %5, %0\n"
386 "mov %6, %%eax\n"
387 "adcl %7, %0\n"
388 "adcl %8, %0\n"
389 "adcl %9, %0\n"
390 "adcl $0, %0"
391 : "+r" (sum)
392 : "g" (((const u_int32_t *)w)[4]),
393 "g" (((const u_int32_t *)w)[0]),
394 "g" (((const u_int32_t *)w)[1]),
395 "g" (((const u_int32_t *)w)[2]),
396 "g" (((const u_int32_t *)w)[3]),
397 "g" (((const u_int32_t *)w)[8]),
398 "g" (((const u_int32_t *)w)[5]),
399 "g" (((const u_int32_t *)w)[6]),
400 "g" (((const u_int32_t *)w)[7])
401 : "eax"
402 );
403 w += 16;
404 }
405 mlen += 32 + 1;
406 if (mlen >= 32) {
407 __asm __volatile (
408 "addl %1, %0\n"
409 "adcl %2, %0\n"
410 "adcl %3, %0\n"
411 "adcl %4, %0\n"
412 "adcl %5, %0\n"
413 "adcl %6, %0\n"
414 "adcl %7, %0\n"
415 "adcl %8, %0\n"
416 "adcl $0, %0"
417 : "+r" (sum)
418 : "g" (((const u_int32_t *)w)[4]),
419 "g" (((const u_int32_t *)w)[0]),
420 "g" (((const u_int32_t *)w)[1]),
421 "g" (((const u_int32_t *)w)[2]),
422 "g" (((const u_int32_t *)w)[3]),
423 "g" (((const u_int32_t *)w)[5]),
424 "g" (((const u_int32_t *)w)[6]),
425 "g" (((const u_int32_t *)w)[7])
426 );
427 w += 16;
428 mlen -= 32;
429 }
430 if (mlen >= 16) {
431 __asm __volatile (
432 "addl %1, %0\n"
433 "adcl %2, %0\n"
434 "adcl %3, %0\n"
435 "adcl %4, %0\n"
436 "adcl $0, %0"
437 : "+r" (sum)
438 : "g" (((const u_int32_t *)w)[0]),
439 "g" (((const u_int32_t *)w)[1]),
440 "g" (((const u_int32_t *)w)[2]),
441 "g" (((const u_int32_t *)w)[3])
442 );
443 w += 8;
444 mlen -= 16;
445 }
446 if (mlen >= 8) {
447 __asm __volatile (
448 "addl %1, %0\n"
449 "adcl %2, %0\n"
450 "adcl $0, %0"
451 : "+r" (sum)
452 : "g" (((const u_int32_t *)w)[0]),
453 "g" (((const u_int32_t *)w)[1])
454 );
455 w += 4;
456 mlen -= 8;
457 }
458 if (mlen == 0 && byte_swapped == 0)
459 continue; /* worth 1% maybe ?? */
460 REDUCE;
461 while ((mlen -= 2) >= 0) {
462 sum += *w++;
463 }
464 if (byte_swapped) {
465 sum <<= 8;
466 byte_swapped = 0;
467 if (mlen == -1) {
468 su.c[1] = *(char *)w;
469 sum += su.s;
470 mlen = 0;
471 } else
472 mlen = -1;
473 } else if (mlen == -1)
474 /*
475 * This mbuf has odd number of bytes.
476 * There could be a word split betwen
477 * this mbuf and the next mbuf.
478 * Save the last byte (to prepend to next mbuf).
479 */
480 su.c[0] = *(char *)w;
481 }
482
483 if (len)
484 printf("%s: out of data by %d\n", __func__, len);
485 if (mlen == -1) {
486 /* The last mbuf has odd # of bytes. Follow the
487 standard (the odd byte is shifted left by 8 bits) */
488 su.c[1] = 0;
489 sum += su.s;
490 }
491 REDUCE;
492 return (~sum & 0xffff);
493 }
494 #endif
Cache object: 7cfe086cb096dc00c63116e9997e2f99
|