1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * from tahoe: in_cksum.c 1.2 86/01/05
32 * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41
42 #include <netinet/in.h>
43 #include <netinet/in_systm.h>
44 #include <netinet/ip.h>
45
46 #include <machine/in_cksum.h>
47
48 /*
49 * Checksum routine for Internet Protocol family headers.
50 *
51 * This routine is very heavily used in the network
52 * code and should be modified for each CPU to be as fast as possible.
53 *
54 * This implementation is 386 version.
55 */
56
57 #undef ADDCARRY
58 #define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff
59 /*
60 * icc needs to be special cased here, as the asm code below results
61 * in broken code if compiled with icc.
62 */
63 #if !defined(__GNUCLIKE_ASM)
64 /* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */
65 #define REDUCE32 \
66 { \
67 q_util.q = sum; \
68 sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
69 }
70 #define REDUCE16 \
71 { \
72 q_util.q = sum; \
73 l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
74 sum = l_util.s[0] + l_util.s[1]; \
75 ADDCARRY(sum); \
76 }
77 #endif
78 #define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
79
80 #if !defined(__GNUCLIKE_ASM)
81 static const u_int32_t in_masks[] = {
82 /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
83 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
84 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
85 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
86 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
87 };
88
89 union l_util {
90 u_int16_t s[2];
91 u_int32_t l;
92 };
93 union q_util {
94 u_int16_t s[4];
95 u_int32_t l[2];
96 u_int64_t q;
97 };
98
99 static u_int64_t
100 in_cksumdata(const u_int32_t *lw, int len)
101 {
102 u_int64_t sum = 0;
103 u_int64_t prefilled;
104 int offset;
105 union q_util q_util;
106
107 if ((3 & (long) lw) == 0 && len == 20) {
108 sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
109 REDUCE32;
110 return sum;
111 }
112
113 if ((offset = 3 & (long) lw) != 0) {
114 const u_int32_t *masks = in_masks + (offset << 2);
115 lw = (u_int32_t *) (((long) lw) - offset);
116 sum = *lw++ & masks[len >= 3 ? 3 : len];
117 len -= 4 - offset;
118 if (len <= 0) {
119 REDUCE32;
120 return sum;
121 }
122 }
123 #if 0
124 /*
125 * Force to cache line boundary.
126 */
127 offset = 32 - (0x1f & (long) lw);
128 if (offset < 32 && len > offset) {
129 len -= offset;
130 if (4 & offset) {
131 sum += (u_int64_t) lw[0];
132 lw += 1;
133 }
134 if (8 & offset) {
135 sum += (u_int64_t) lw[0] + lw[1];
136 lw += 2;
137 }
138 if (16 & offset) {
139 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
140 lw += 4;
141 }
142 }
143 #endif
144 /*
145 * access prefilling to start load of next cache line.
146 * then add current cache line
147 * save result of prefilling for loop iteration.
148 */
149 prefilled = lw[0];
150 while ((len -= 32) >= 4) {
151 u_int64_t prefilling = lw[8];
152 sum += prefilled + lw[1] + lw[2] + lw[3]
153 + lw[4] + lw[5] + lw[6] + lw[7];
154 lw += 8;
155 prefilled = prefilling;
156 }
157 if (len >= 0) {
158 sum += prefilled + lw[1] + lw[2] + lw[3]
159 + lw[4] + lw[5] + lw[6] + lw[7];
160 lw += 8;
161 } else {
162 len += 32;
163 }
164 while ((len -= 16) >= 0) {
165 sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
166 lw += 4;
167 }
168 len += 16;
169 while ((len -= 4) >= 0) {
170 sum += (u_int64_t) *lw++;
171 }
172 len += 4;
173 if (len > 0)
174 sum += (u_int64_t) (in_masks[len] & *lw);
175 REDUCE32;
176 return sum;
177 }
178
179 u_short
180 in_addword(u_short a, u_short b)
181 {
182 u_int64_t sum = a + b;
183
184 ADDCARRY(sum);
185 return (sum);
186 }
187
188 u_short
189 in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
190 {
191 u_int64_t sum;
192 union q_util q_util;
193 union l_util l_util;
194
195 sum = (u_int64_t) a + b + c;
196 REDUCE16;
197 return (sum);
198 }
199
200 u_short
201 in_cksum_skip(struct mbuf *m, int len, int skip)
202 {
203 u_int64_t sum = 0;
204 int mlen = 0;
205 int clen = 0;
206 caddr_t addr;
207 union q_util q_util;
208 union l_util l_util;
209
210 len -= skip;
211 for (; skip && m; m = m->m_next) {
212 if (m->m_len > skip) {
213 mlen = m->m_len - skip;
214 addr = mtod(m, caddr_t) + skip;
215 goto skip_start;
216 } else {
217 skip -= m->m_len;
218 }
219 }
220
221 for (; m && len; m = m->m_next) {
222 if (m->m_len == 0)
223 continue;
224 mlen = m->m_len;
225 addr = mtod(m, caddr_t);
226 skip_start:
227 if (len < mlen)
228 mlen = len;
229 if ((clen ^ (long) addr) & 1)
230 sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8;
231 else
232 sum += in_cksumdata((const u_int32_t *)addr, mlen);
233
234 clen += mlen;
235 len -= mlen;
236 }
237 REDUCE16;
238 return (~sum & 0xffff);
239 }
240
241 u_int in_cksum_hdr(const struct ip *ip)
242 {
243 u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip));
244 union q_util q_util;
245 union l_util l_util;
246
247 REDUCE16;
248 return (~sum & 0xffff);
249 }
250 #else
251
252 /*
253 * These asm statements require __volatile because they pass information
254 * via the condition codes. GCC does not currently provide a way to specify
255 * the condition codes as an input or output operand.
256 *
257 * The LOAD macro below is effectively a prefetch into cache. GCC will
258 * load the value into a register but will not use it. Since modern CPUs
259 * reorder operations, this will generally take place in parallel with
260 * other calculations.
261 */
262 u_short
263 in_cksum_skip(m, len, skip)
264 struct mbuf *m;
265 int len;
266 int skip;
267 {
268 u_short *w;
269 unsigned sum = 0;
270 int mlen = 0;
271 int byte_swapped = 0;
272 union { char c[2]; u_short s; } su;
273
274 len -= skip;
275 for (; skip && m; m = m->m_next) {
276 if (m->m_len > skip) {
277 mlen = m->m_len - skip;
278 w = (u_short *)(mtod(m, u_char *) + skip);
279 goto skip_start;
280 } else {
281 skip -= m->m_len;
282 }
283 }
284
285 for (;m && len; m = m->m_next) {
286 if (m->m_len == 0)
287 continue;
288 w = mtod(m, u_short *);
289 if (mlen == -1) {
290 /*
291 * The first byte of this mbuf is the continuation
292 * of a word spanning between this mbuf and the
293 * last mbuf.
294 */
295
296 /* su.c[0] is already saved when scanning previous
297 * mbuf. sum was REDUCEd when we found mlen == -1
298 */
299 su.c[1] = *(u_char *)w;
300 sum += su.s;
301 w = (u_short *)((char *)w + 1);
302 mlen = m->m_len - 1;
303 len--;
304 } else
305 mlen = m->m_len;
306 skip_start:
307 if (len < mlen)
308 mlen = len;
309 len -= mlen;
310 /*
311 * Force to long boundary so we do longword aligned
312 * memory operations
313 */
314 if (3 & (int) w) {
315 REDUCE;
316 if ((1 & (int) w) && (mlen > 0)) {
317 sum <<= 8;
318 su.c[0] = *(char *)w;
319 w = (u_short *)((char *)w + 1);
320 mlen--;
321 byte_swapped = 1;
322 }
323 if ((2 & (int) w) && (mlen >= 2)) {
324 sum += *w++;
325 mlen -= 2;
326 }
327 }
328 /*
329 * Advance to a 486 cache line boundary.
330 */
331 if (4 & (int) w && mlen >= 4) {
332 __asm __volatile (
333 "addl %1, %0\n"
334 "adcl $0, %0"
335 : "+r" (sum)
336 : "g" (((const u_int32_t *)w)[0])
337 );
338 w += 2;
339 mlen -= 4;
340 }
341 if (8 & (int) w && mlen >= 8) {
342 __asm __volatile (
343 "addl %1, %0\n"
344 "adcl %2, %0\n"
345 "adcl $0, %0"
346 : "+r" (sum)
347 : "g" (((const u_int32_t *)w)[0]),
348 "g" (((const u_int32_t *)w)[1])
349 );
350 w += 4;
351 mlen -= 8;
352 }
353 /*
354 * Do as much of the checksum as possible 32 bits at at time.
355 * In fact, this loop is unrolled to make overhead from
356 * branches &c small.
357 */
358 mlen -= 1;
359 while ((mlen -= 32) >= 0) {
360 /*
361 * Add with carry 16 words and fold in the last
362 * carry by adding a 0 with carry.
363 *
364 * The early ADD(16) and the LOAD(32) are to load
365 * the next 2 cache lines in advance on 486's. The
366 * 486 has a penalty of 2 clock cycles for loading
367 * a cache line, plus whatever time the external
368 * memory takes to load the first word(s) addressed.
369 * These penalties are unavoidable. Subsequent
370 * accesses to a cache line being loaded (and to
371 * other external memory?) are delayed until the
372 * whole load finishes. These penalties are mostly
373 * avoided by not accessing external memory for
374 * 8 cycles after the ADD(16) and 12 cycles after
375 * the LOAD(32). The loop terminates when mlen
376 * is initially 33 (not 32) to guaranteed that
377 * the LOAD(32) is within bounds.
378 */
379 __asm __volatile (
380 "addl %1, %0\n"
381 "adcl %2, %0\n"
382 "adcl %3, %0\n"
383 "adcl %4, %0\n"
384 "adcl %5, %0\n"
385 "mov %6, %%eax\n"
386 "adcl %7, %0\n"
387 "adcl %8, %0\n"
388 "adcl %9, %0\n"
389 "adcl $0, %0"
390 : "+r" (sum)
391 : "g" (((const u_int32_t *)w)[4]),
392 "g" (((const u_int32_t *)w)[0]),
393 "g" (((const u_int32_t *)w)[1]),
394 "g" (((const u_int32_t *)w)[2]),
395 "g" (((const u_int32_t *)w)[3]),
396 "g" (((const u_int32_t *)w)[8]),
397 "g" (((const u_int32_t *)w)[5]),
398 "g" (((const u_int32_t *)w)[6]),
399 "g" (((const u_int32_t *)w)[7])
400 : "eax"
401 );
402 w += 16;
403 }
404 mlen += 32 + 1;
405 if (mlen >= 32) {
406 __asm __volatile (
407 "addl %1, %0\n"
408 "adcl %2, %0\n"
409 "adcl %3, %0\n"
410 "adcl %4, %0\n"
411 "adcl %5, %0\n"
412 "adcl %6, %0\n"
413 "adcl %7, %0\n"
414 "adcl %8, %0\n"
415 "adcl $0, %0"
416 : "+r" (sum)
417 : "g" (((const u_int32_t *)w)[4]),
418 "g" (((const u_int32_t *)w)[0]),
419 "g" (((const u_int32_t *)w)[1]),
420 "g" (((const u_int32_t *)w)[2]),
421 "g" (((const u_int32_t *)w)[3]),
422 "g" (((const u_int32_t *)w)[5]),
423 "g" (((const u_int32_t *)w)[6]),
424 "g" (((const u_int32_t *)w)[7])
425 );
426 w += 16;
427 mlen -= 32;
428 }
429 if (mlen >= 16) {
430 __asm __volatile (
431 "addl %1, %0\n"
432 "adcl %2, %0\n"
433 "adcl %3, %0\n"
434 "adcl %4, %0\n"
435 "adcl $0, %0"
436 : "+r" (sum)
437 : "g" (((const u_int32_t *)w)[0]),
438 "g" (((const u_int32_t *)w)[1]),
439 "g" (((const u_int32_t *)w)[2]),
440 "g" (((const u_int32_t *)w)[3])
441 );
442 w += 8;
443 mlen -= 16;
444 }
445 if (mlen >= 8) {
446 __asm __volatile (
447 "addl %1, %0\n"
448 "adcl %2, %0\n"
449 "adcl $0, %0"
450 : "+r" (sum)
451 : "g" (((const u_int32_t *)w)[0]),
452 "g" (((const u_int32_t *)w)[1])
453 );
454 w += 4;
455 mlen -= 8;
456 }
457 if (mlen == 0 && byte_swapped == 0)
458 continue; /* worth 1% maybe ?? */
459 REDUCE;
460 while ((mlen -= 2) >= 0) {
461 sum += *w++;
462 }
463 if (byte_swapped) {
464 sum <<= 8;
465 byte_swapped = 0;
466 if (mlen == -1) {
467 su.c[1] = *(char *)w;
468 sum += su.s;
469 mlen = 0;
470 } else
471 mlen = -1;
472 } else if (mlen == -1)
473 /*
474 * This mbuf has odd number of bytes.
475 * There could be a word split between
476 * this mbuf and the next mbuf.
477 * Save the last byte (to prepend to next mbuf).
478 */
479 su.c[0] = *(char *)w;
480 }
481
482 if (len)
483 printf("%s: out of data by %d\n", __func__, len);
484 if (mlen == -1) {
485 /* The last mbuf has odd # of bytes. Follow the
486 standard (the odd byte is shifted left by 8 bits) */
487 su.c[1] = 0;
488 sum += su.s;
489 }
490 REDUCE;
491 return (~sum & 0xffff);
492 }
493 #endif
Cache object: 31ad0015f74a6cd99faadea12f120c92
|