The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/in_cksum.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
    3  *
    4  * This code is derived from software contributed to The DragonFly Project
    5  * by Matthew Dillon <dillon@backplane.com>
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  * 3. Neither the name of The DragonFly Project nor the names of its
   18  *    contributors may be used to endorse or promote products derived
   19  *    from this software without specific, prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  * $DragonFly: src/sys/netinet/in_cksum.c,v 1.9 2005/01/06 09:14:13 hsu Exp $
   35  */
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/mbuf.h>
   40 #include <sys/in_cksum.h>
   41 
   42 #include <netinet/in.h>
   43 #include <netinet/in_systm.h>
   44 #include <netinet/ip.h>
   45 #include <netinet/ip_var.h>
   46 
   47 #include <machine/endian.h>
   48 
   49 /*
   50  * Return the 16 bit 1's complement checksum in network byte order.  Devolve
   51  * the mbuf into 32 bit aligned segments that we can pass to assembly and
   52  * do the rest manually.  Even though we return a 16 bit unsigned value,
   53  * we declare it as a 32 bit unsigned value to reduce unnecessary assembly
   54  * conversions.
   55  *
   56  * Byte ordering issues.  Note two things.  First, no secondary carry occurs,
   57  * and second, a one's complement checksum is endian-independant.  If we are
   58  * given a data buffer in network byte order, our checksum will be in network
   59  * byte order.
   60  *
   61  * 0xffff + 0xffff = 0xfffe + C = 0xffff (so no second carry occurs).
   62  *
   63  * 0x8142 + 0x8243 = 0x0385 + C = 0x0386 (checksum is in same byte order
   64  * 0x4281 + 0x4382              = 0x8603  as the data regardless of arch)
   65  *
   66  * This works with 16, 32, 64, etc... bits as long as we deal with the
   67  * carry when collapsing it back down to 16 bits.
   68  */
   69 
   70 __uint32_t
   71 in_cksum_range(struct mbuf *m, int nxt, int offset, int bytes)
   72 {
   73     __uint8_t *ptr;
   74     __uint32_t sum0;
   75     __uint32_t sum1;
   76     int n;
   77     int flip;
   78 
   79     sum0 = 0;
   80     sum1 = 0;
   81     flip = 0;
   82 
   83     if (nxt != 0) {
   84         uint32_t sum32;
   85         struct ipovly ipov;
   86 
   87         /* pseudo header */
   88         if (offset < sizeof(struct ipovly))
   89                 panic("in_cksum_range: offset too short");
   90         if (m->m_len < sizeof(struct ip))
   91                 panic("in_cksum_range: bad mbuf chain");
   92         bzero(&ipov, sizeof ipov);
   93         ipov.ih_len = htons(bytes);
   94         ipov.ih_pr = nxt;
   95         ipov.ih_src = mtod(m, struct ip *)->ip_src;
   96         ipov.ih_dst = mtod(m, struct ip *)->ip_dst;
   97         ptr = (uint8_t *)&ipov;
   98 
   99         sum32 = asm_ones32(ptr, sizeof(ipov) / 4);
  100         sum32 = (sum32 >> 16) + (sum32 & 0xffff);
  101         if (flip)
  102             sum1 += sum32;
  103         else
  104             sum0 += sum32;
  105     }
  106 
  107     /*
  108      * Skip fully engulfed mbufs.  Branch predict optimal.
  109      */
  110     while (m && offset >= m->m_len) {
  111         offset -= m->m_len;
  112         m = m->m_next;
  113     }
  114 
  115     /*
  116      * Process the checksum for each segment.  Note that the code below is
  117      * branch-predict optimal, so it's faster then you might otherwise
  118      * believe.  When we are buffer-aligned but also odd-byte-aligned from
  119      * the point of view of the IP packet, we accumulate to sum1 instead of
  120      * sum0.
  121      *
  122      * Initial offsets do not pre-set flip (assert that offset is even?)
  123      */
  124     while (bytes > 0 && m) {
  125         /*
  126          * Calculate pointer base and number of bytes to snarf, account
  127          * for snarfed bytes.
  128          */
  129         ptr = mtod(m, __uint8_t *) + offset;
  130         if ((n = m->m_len - offset) > bytes)
  131             n = bytes;
  132         bytes -= n;
  133 
  134         /*
  135          * First 16-bit-align our buffer by eating a byte if necessary,
  136          * then 32-bit-align our buffer by eating a word if necessary.
  137          *
  138          * We are endian-sensitive when chomping a byte.  WARNING!  Be
  139          * careful optimizing this!  16 ane 32 bit words must be aligned
  140          * for this to be generic code.
  141          */
  142         if (((intptr_t)ptr & 1) && n) {
  143 #if BYTE_ORDER == LITTLE_ENDIAN
  144             if (flip)
  145                 sum1 += ptr[0];
  146             else
  147                 sum0 += ptr[0];
  148 #else
  149             if (flip)
  150                 sum0 += ptr[0];
  151             else
  152                 sum1 += ptr[0];
  153 #endif
  154             ++ptr;
  155             --n;
  156             flip = 1 - flip;
  157         }
  158         if (((intptr_t)ptr & 2) && n > 1) {
  159             if (flip)
  160                 sum1 += *(__uint16_t *)ptr;
  161             else
  162                 sum0 += *(__uint16_t *)ptr;
  163             ptr += 2;
  164             n -= 2;
  165         }
  166 
  167         /*
  168          * Process a 32-bit aligned data buffer and accumulate the result
  169          * in sum0 or sum1.  Allow only one 16 bit overflow carry.
  170          */
  171         if (n >= 4) {
  172             __uint32_t sum32;
  173 
  174             sum32 = asm_ones32((void *)ptr, n >> 2);
  175             sum32 = (sum32 >> 16) + (sum32 & 0xffff);
  176             if (flip)
  177                 sum1 += sum32;
  178             else
  179                 sum0 += sum32;
  180             ptr += n & ~3;
  181             /* n &= 3; dontcare */
  182         }
  183 
  184         /*
  185          * Handle oddly-sized buffers.  Handle word issues first while
  186          * ptr is still aligned.
  187          */
  188         if (n & 2) {
  189             if (flip)
  190                 sum1 += *(__uint16_t *)ptr;
  191             else
  192                 sum0 += *(__uint16_t *)ptr;
  193             ptr += 2;
  194             /* n -= 2; dontcare */
  195         }
  196         if (n & 1) {
  197 #if BYTE_ORDER == LITTLE_ENDIAN
  198             if (flip)
  199                 sum1 += ptr[0];
  200             else
  201                 sum0 += ptr[0];
  202 #else
  203             if (flip)
  204                 sum0 += ptr[0];
  205             else
  206                 sum1 += ptr[0];
  207 #endif
  208             /* ++ptr; dontcare */
  209             /* --n; dontcare */
  210             flip = 1 - flip;
  211         }
  212         m = m->m_next;
  213         offset = 0;
  214     }
  215 
  216     /*
  217      * Due to byte aligned or oddly-sized buffers we may have a checksum
  218      * in sum1 which needs to be shifted and added to our main sum.  There
  219      * is a presumption here that no more then 255 overflows occured which
  220      * is 255/3 byte aligned mbufs in the worst case.
  221      */
  222     sum0 += sum1 << 8;
  223     sum0 = (sum0 >> 16) + (sum0 & 0xffff);
  224     if (sum0 > 0xffff)
  225         ++sum0;
  226     return(~sum0 & 0xffff);
  227 }
  228 

Cache object: 96be7d496b0fea03b82c5559b7eef4a9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.