The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet6/frag6.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the project nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/malloc.h>
   38 #include <sys/mbuf.h>
   39 #include <sys/domain.h>
   40 #include <sys/protosw.h>
   41 #include <sys/socket.h>
   42 #include <sys/errno.h>
   43 #include <sys/time.h>
   44 #include <sys/kernel.h>
   45 #include <sys/syslog.h>
   46 
   47 #include <net/if.h>
   48 #include <net/route.h>
   49 
   50 #include <netinet/in.h>
   51 #include <netinet/in_var.h>
   52 #include <netinet/ip6.h>
   53 #include <netinet6/ip6_var.h>
   54 #include <netinet/icmp6.h>
   55 #include <netinet/in_systm.h>   /* for ECN definitions */
   56 #include <netinet/ip.h>         /* for ECN definitions */
   57 
   58 /*
   59  * Define it to get a correct behavior on per-interface statistics.
   60  * You will need to perform an extra routing table lookup, per fragment,
   61  * to do it.  This may, or may not be, a performance hit.
   62  */
   63 #define IN6_IFSTAT_STRICT
   64 
   65 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
   66 static void frag6_deq(struct ip6asfrag *);
   67 static void frag6_insque(struct ip6q *, struct ip6q *);
   68 static void frag6_remque(struct ip6q *);
   69 static void frag6_freef(struct ip6q *);
   70 
   71 static struct mtx ip6qlock;
   72 /*
   73  * These fields all protected by ip6qlock.
   74  */
   75 static u_int frag6_nfragpackets;
   76 static u_int frag6_nfrags;
   77 static struct   ip6q ip6q;      /* ip6 reassemble queue */
   78 
   79 #define IP6Q_LOCK_INIT()        mtx_init(&ip6qlock, "ip6qlock", NULL, MTX_DEF);
   80 #define IP6Q_LOCK()             mtx_lock(&ip6qlock)
   81 #define IP6Q_TRYLOCK()          mtx_trylock(&ip6qlock)
   82 #define IP6Q_LOCK_ASSERT()      mtx_assert(&ip6qlock, MA_OWNED)
   83 #define IP6Q_UNLOCK()           mtx_unlock(&ip6qlock)
   84 
   85 static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
   86 
   87 /*
   88  * Initialise reassembly queue and fragment identifier.
   89  */
   90 static void
   91 frag6_change(void *tag)
   92 {
   93 
   94         ip6_maxfragpackets = nmbclusters / 4;
   95         ip6_maxfrags = nmbclusters / 4;
   96 }
   97 
   98 void
   99 frag6_init(void)
  100 {
  101 
  102         ip6_maxfragpackets = nmbclusters / 4;
  103         ip6_maxfrags = nmbclusters / 4;
  104         EVENTHANDLER_REGISTER(nmbclusters_change,
  105             frag6_change, NULL, EVENTHANDLER_PRI_ANY);
  106 
  107         IP6Q_LOCK_INIT();
  108 
  109         ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
  110 }
  111 
  112 /*
  113  * In RFC2460, fragment and reassembly rule do not agree with each other,
  114  * in terms of next header field handling in fragment header.
  115  * While the sender will use the same value for all of the fragmented packets,
  116  * receiver is suggested not to check the consistency.
  117  *
  118  * fragment rule (p20):
  119  *      (2) A Fragment header containing:
  120  *      The Next Header value that identifies the first header of
  121  *      the Fragmentable Part of the original packet.
  122  *              -> next header field is same for all fragments
  123  *
  124  * reassembly rule (p21):
  125  *      The Next Header field of the last header of the Unfragmentable
  126  *      Part is obtained from the Next Header field of the first
  127  *      fragment's Fragment header.
  128  *              -> should grab it from the first fragment only
  129  *
  130  * The following note also contradicts with fragment rule - noone is going to
  131  * send different fragment with different next header field.
  132  *
  133  * additional note (p22):
  134  *      The Next Header values in the Fragment headers of different
  135  *      fragments of the same original packet may differ.  Only the value
  136  *      from the Offset zero fragment packet is used for reassembly.
  137  *              -> should grab it from the first fragment only
  138  *
  139  * There is no explicit reason given in the RFC.  Historical reason maybe?
  140  */
  141 /*
  142  * Fragment input
  143  */
  144 int
  145 frag6_input(struct mbuf **mp, int *offp, int proto)
  146 {
  147         struct mbuf *m = *mp, *t;
  148         struct ip6_hdr *ip6;
  149         struct ip6_frag *ip6f;
  150         struct ip6q *q6;
  151         struct ip6asfrag *af6, *ip6af, *af6dwn;
  152 #ifdef IN6_IFSTAT_STRICT
  153         struct in6_ifaddr *ia;
  154 #endif
  155         int offset = *offp, nxt, i, next;
  156         int first_frag = 0;
  157         int fragoff, frgpartlen;        /* must be larger than u_int16_t */
  158         struct ifnet *dstifp;
  159         u_int8_t ecn, ecn0;
  160 #if 0
  161         char ip6buf[INET6_ADDRSTRLEN];
  162 #endif
  163 
  164         ip6 = mtod(m, struct ip6_hdr *);
  165 #ifndef PULLDOWN_TEST
  166         IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
  167         ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
  168 #else
  169         IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
  170         if (ip6f == NULL)
  171                 return (IPPROTO_DONE);
  172 #endif
  173 
  174         dstifp = NULL;
  175 #ifdef IN6_IFSTAT_STRICT
  176         /* find the destination interface of the packet. */
  177         if ((ia = ip6_getdstifaddr(m)) != NULL)
  178                 dstifp = ia->ia_ifp;
  179 #else
  180         /* we are violating the spec, this is not the destination interface */
  181         if ((m->m_flags & M_PKTHDR) != 0)
  182                 dstifp = m->m_pkthdr.rcvif;
  183 #endif
  184 
  185         /* jumbo payload can't contain a fragment header */
  186         if (ip6->ip6_plen == 0) {
  187                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
  188                 in6_ifstat_inc(dstifp, ifs6_reass_fail);
  189                 return IPPROTO_DONE;
  190         }
  191 
  192         /*
  193          * check whether fragment packet's fragment length is
  194          * multiple of 8 octets.
  195          * sizeof(struct ip6_frag) == 8
  196          * sizeof(struct ip6_hdr) = 40
  197          */
  198         if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
  199             (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
  200                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  201                     offsetof(struct ip6_hdr, ip6_plen));
  202                 in6_ifstat_inc(dstifp, ifs6_reass_fail);
  203                 return IPPROTO_DONE;
  204         }
  205 
  206         ip6stat.ip6s_fragments++;
  207         in6_ifstat_inc(dstifp, ifs6_reass_reqd);
  208 
  209         /* offset now points to data portion */
  210         offset += sizeof(struct ip6_frag);
  211 
  212         /*
  213          * XXX-BZ RFC XXXX (draft-gont-6man-ipv6-atomic-fragments)
  214          * Handle "atomic" fragments (offset and m bit set to 0) upfront,
  215          * unrelated to any reassembly.  Just skip the fragment header.
  216          */
  217         if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
  218                 /* XXX-BZ we want dedicated counters for this. */
  219                 ip6stat.ip6s_reassembled++;
  220                 in6_ifstat_inc(dstifp, ifs6_reass_ok);
  221                 *offp = offset;
  222                 return (ip6f->ip6f_nxt);
  223         }
  224 
  225         IP6Q_LOCK();
  226 
  227         /*
  228          * Enforce upper bound on number of fragments.
  229          * If maxfrag is 0, never accept fragments.
  230          * If maxfrag is -1, accept all fragments without limitation.
  231          */
  232         if (ip6_maxfrags < 0)
  233                 ;
  234         else if (frag6_nfrags >= (u_int)ip6_maxfrags)
  235                 goto dropfrag;
  236 
  237         for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
  238                 if (ip6f->ip6f_ident == q6->ip6q_ident &&
  239                     IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
  240                     IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
  241                         break;
  242 
  243         if (q6 == &ip6q) {
  244                 /*
  245                  * the first fragment to arrive, create a reassembly queue.
  246                  */
  247                 first_frag = 1;
  248 
  249                 /*
  250                  * Enforce upper bound on number of fragmented packets
  251                  * for which we attempt reassembly;
  252                  * If maxfragpackets is 0, never accept fragments.
  253                  * If maxfragpackets is -1, accept all fragments without
  254                  * limitation.
  255                  */
  256                 if (ip6_maxfragpackets < 0)
  257                         ;
  258                 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
  259                         goto dropfrag;
  260                 frag6_nfragpackets++;
  261                 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
  262                     M_NOWAIT);
  263                 if (q6 == NULL)
  264                         goto dropfrag;
  265                 bzero(q6, sizeof(*q6));
  266 
  267                 frag6_insque(q6, &ip6q);
  268 
  269                 /* ip6q_nxt will be filled afterwards, from 1st fragment */
  270                 q6->ip6q_down   = q6->ip6q_up = (struct ip6asfrag *)q6;
  271 #ifdef notyet
  272                 q6->ip6q_nxtp   = (u_char *)nxtp;
  273 #endif
  274                 q6->ip6q_ident  = ip6f->ip6f_ident;
  275                 q6->ip6q_ttl    = IPV6_FRAGTTL;
  276                 q6->ip6q_src    = ip6->ip6_src;
  277                 q6->ip6q_dst    = ip6->ip6_dst;
  278                 q6->ip6q_ecn    =
  279                     (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  280                 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
  281 
  282                 q6->ip6q_nfrag = 0;
  283         }
  284 
  285         /*
  286          * If it's the 1st fragment, record the length of the
  287          * unfragmentable part and the next header of the fragment header.
  288          */
  289         fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
  290         if (fragoff == 0) {
  291                 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
  292                     sizeof(struct ip6_frag);
  293                 q6->ip6q_nxt = ip6f->ip6f_nxt;
  294         }
  295 
  296         /*
  297          * Check that the reassembled packet would not exceed 65535 bytes
  298          * in size.
  299          * If it would exceed, discard the fragment and return an ICMP error.
  300          */
  301         frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
  302         if (q6->ip6q_unfrglen >= 0) {
  303                 /* The 1st fragment has already arrived. */
  304                 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
  305                         icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  306                             offset - sizeof(struct ip6_frag) +
  307                             offsetof(struct ip6_frag, ip6f_offlg));
  308                         IP6Q_UNLOCK();
  309                         return (IPPROTO_DONE);
  310                 }
  311         } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
  312                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  313                     offset - sizeof(struct ip6_frag) +
  314                     offsetof(struct ip6_frag, ip6f_offlg));
  315                 IP6Q_UNLOCK();
  316                 return (IPPROTO_DONE);
  317         }
  318         /*
  319          * If it's the first fragment, do the above check for each
  320          * fragment already stored in the reassembly queue.
  321          */
  322         if (fragoff == 0) {
  323                 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
  324                      af6 = af6dwn) {
  325                         af6dwn = af6->ip6af_down;
  326 
  327                         if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
  328                             IPV6_MAXPACKET) {
  329                                 struct mbuf *merr = IP6_REASS_MBUF(af6);
  330                                 struct ip6_hdr *ip6err;
  331                                 int erroff = af6->ip6af_offset;
  332 
  333                                 /* dequeue the fragment. */
  334                                 frag6_deq(af6);
  335                                 free(af6, M_FTABLE);
  336 
  337                                 /* adjust pointer. */
  338                                 ip6err = mtod(merr, struct ip6_hdr *);
  339 
  340                                 /*
  341                                  * Restore source and destination addresses
  342                                  * in the erroneous IPv6 header.
  343                                  */
  344                                 ip6err->ip6_src = q6->ip6q_src;
  345                                 ip6err->ip6_dst = q6->ip6q_dst;
  346 
  347                                 icmp6_error(merr, ICMP6_PARAM_PROB,
  348                                     ICMP6_PARAMPROB_HEADER,
  349                                     erroff - sizeof(struct ip6_frag) +
  350                                     offsetof(struct ip6_frag, ip6f_offlg));
  351                         }
  352                 }
  353         }
  354 
  355         ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
  356             M_NOWAIT);
  357         if (ip6af == NULL)
  358                 goto dropfrag;
  359         bzero(ip6af, sizeof(*ip6af));
  360         ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
  361         ip6af->ip6af_off = fragoff;
  362         ip6af->ip6af_frglen = frgpartlen;
  363         ip6af->ip6af_offset = offset;
  364         IP6_REASS_MBUF(ip6af) = m;
  365 
  366         if (first_frag) {
  367                 af6 = (struct ip6asfrag *)q6;
  368                 goto insert;
  369         }
  370 
  371         /*
  372          * Handle ECN by comparing this segment with the first one;
  373          * if CE is set, do not lose CE.
  374          * drop if CE and not-ECT are mixed for the same packet.
  375          */
  376         ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  377         ecn0 = q6->ip6q_ecn;
  378         if (ecn == IPTOS_ECN_CE) {
  379                 if (ecn0 == IPTOS_ECN_NOTECT) {
  380                         free(ip6af, M_FTABLE);
  381                         goto dropfrag;
  382                 }
  383                 if (ecn0 != IPTOS_ECN_CE)
  384                         q6->ip6q_ecn = IPTOS_ECN_CE;
  385         }
  386         if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
  387                 free(ip6af, M_FTABLE);
  388                 goto dropfrag;
  389         }
  390 
  391         /*
  392          * Find a segment which begins after this one does.
  393          */
  394         for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
  395              af6 = af6->ip6af_down)
  396                 if (af6->ip6af_off > ip6af->ip6af_off)
  397                         break;
  398 
  399 #if 0
  400         /*
  401          * If there is a preceding segment, it may provide some of
  402          * our data already.  If so, drop the data from the incoming
  403          * segment.  If it provides all of our data, drop us.
  404          */
  405         if (af6->ip6af_up != (struct ip6asfrag *)q6) {
  406                 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
  407                         - ip6af->ip6af_off;
  408                 if (i > 0) {
  409                         if (i >= ip6af->ip6af_frglen)
  410                                 goto dropfrag;
  411                         m_adj(IP6_REASS_MBUF(ip6af), i);
  412                         ip6af->ip6af_off += i;
  413                         ip6af->ip6af_frglen -= i;
  414                 }
  415         }
  416 
  417         /*
  418          * While we overlap succeeding segments trim them or,
  419          * if they are completely covered, dequeue them.
  420          */
  421         while (af6 != (struct ip6asfrag *)q6 &&
  422                ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
  423                 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
  424                 if (i < af6->ip6af_frglen) {
  425                         af6->ip6af_frglen -= i;
  426                         af6->ip6af_off += i;
  427                         m_adj(IP6_REASS_MBUF(af6), i);
  428                         break;
  429                 }
  430                 af6 = af6->ip6af_down;
  431                 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
  432                 frag6_deq(af6->ip6af_up);
  433         }
  434 #else
  435         /*
  436          * If the incoming framgent overlaps some existing fragments in
  437          * the reassembly queue, drop it, since it is dangerous to override
  438          * existing fragments from a security point of view.
  439          * We don't know which fragment is the bad guy - here we trust
  440          * fragment that came in earlier, with no real reason.
  441          *
  442          * Note: due to changes after disabling this part, mbuf passed to
  443          * m_adj() below now does not meet the requirement.
  444          */
  445         if (af6->ip6af_up != (struct ip6asfrag *)q6) {
  446                 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
  447                         - ip6af->ip6af_off;
  448                 if (i > 0) {
  449 #if 0                           /* suppress the noisy log */
  450                         log(LOG_ERR, "%d bytes of a fragment from %s "
  451                             "overlaps the previous fragment\n",
  452                             i, ip6_sprintf(ip6buf, &q6->ip6q_src));
  453 #endif
  454                         free(ip6af, M_FTABLE);
  455                         goto dropfrag;
  456                 }
  457         }
  458         if (af6 != (struct ip6asfrag *)q6) {
  459                 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
  460                 if (i > 0) {
  461 #if 0                           /* suppress the noisy log */
  462                         log(LOG_ERR, "%d bytes of a fragment from %s "
  463                             "overlaps the succeeding fragment",
  464                             i, ip6_sprintf(ip6buf, &q6->ip6q_src));
  465 #endif
  466                         free(ip6af, M_FTABLE);
  467                         goto dropfrag;
  468                 }
  469         }
  470 #endif
  471 
  472 insert:
  473 
  474         /*
  475          * Stick new segment in its place;
  476          * check for complete reassembly.
  477          * Move to front of packet queue, as we are
  478          * the most recently active fragmented packet.
  479          */
  480         frag6_enq(ip6af, af6->ip6af_up);
  481         frag6_nfrags++;
  482         q6->ip6q_nfrag++;
  483 #if 0 /* xxx */
  484         if (q6 != ip6q.ip6q_next) {
  485                 frag6_remque(q6);
  486                 frag6_insque(q6, &ip6q);
  487         }
  488 #endif
  489         next = 0;
  490         for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
  491              af6 = af6->ip6af_down) {
  492                 if (af6->ip6af_off != next) {
  493                         IP6Q_UNLOCK();
  494                         return IPPROTO_DONE;
  495                 }
  496                 next += af6->ip6af_frglen;
  497         }
  498         if (af6->ip6af_up->ip6af_mff) {
  499                 IP6Q_UNLOCK();
  500                 return IPPROTO_DONE;
  501         }
  502 
  503         /*
  504          * Reassembly is complete; concatenate fragments.
  505          */
  506         ip6af = q6->ip6q_down;
  507         t = m = IP6_REASS_MBUF(ip6af);
  508         af6 = ip6af->ip6af_down;
  509         frag6_deq(ip6af);
  510         while (af6 != (struct ip6asfrag *)q6) {
  511                 af6dwn = af6->ip6af_down;
  512                 frag6_deq(af6);
  513                 while (t->m_next)
  514                         t = t->m_next;
  515                 t->m_next = IP6_REASS_MBUF(af6);
  516                 m_adj(t->m_next, af6->ip6af_offset);
  517                 free(af6, M_FTABLE);
  518                 af6 = af6dwn;
  519         }
  520 
  521         /* adjust offset to point where the original next header starts */
  522         offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
  523         free(ip6af, M_FTABLE);
  524         ip6 = mtod(m, struct ip6_hdr *);
  525         ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
  526         if (q6->ip6q_ecn == IPTOS_ECN_CE)
  527                 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
  528         nxt = q6->ip6q_nxt;
  529 #ifdef notyet
  530         *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
  531 #endif
  532 
  533         /* Delete frag6 header */
  534         if (m->m_len >= offset + sizeof(struct ip6_frag)) {
  535                 /* This is the only possible case with !PULLDOWN_TEST */
  536                 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag),
  537                     offset);
  538                 m->m_data += sizeof(struct ip6_frag);
  539                 m->m_len -= sizeof(struct ip6_frag);
  540         } else {
  541                 /* this comes with no copy if the boundary is on cluster */
  542                 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
  543                         frag6_remque(q6);
  544                         frag6_nfrags -= q6->ip6q_nfrag;
  545                         free(q6, M_FTABLE);
  546                         frag6_nfragpackets--;
  547                         goto dropfrag;
  548                 }
  549                 m_adj(t, sizeof(struct ip6_frag));
  550                 m_cat(m, t);
  551         }
  552 
  553         /*
  554          * Store NXT to the original.
  555          */
  556         {
  557                 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
  558                 *prvnxtp = nxt;
  559         }
  560 
  561         frag6_remque(q6);
  562         frag6_nfrags -= q6->ip6q_nfrag;
  563         free(q6, M_FTABLE);
  564         frag6_nfragpackets--;
  565 
  566         if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
  567                 int plen = 0;
  568                 for (t = m; t; t = t->m_next)
  569                         plen += t->m_len;
  570                 m->m_pkthdr.len = plen;
  571         }
  572 
  573         ip6stat.ip6s_reassembled++;
  574         in6_ifstat_inc(dstifp, ifs6_reass_ok);
  575 
  576         /*
  577          * Tell launch routine the next header
  578          */
  579 
  580         *mp = m;
  581         *offp = offset;
  582 
  583         IP6Q_UNLOCK();
  584         return nxt;
  585 
  586  dropfrag:
  587         IP6Q_UNLOCK();
  588         in6_ifstat_inc(dstifp, ifs6_reass_fail);
  589         ip6stat.ip6s_fragdropped++;
  590         m_freem(m);
  591         return IPPROTO_DONE;
  592 }
  593 
  594 /*
  595  * Free a fragment reassembly header and all
  596  * associated datagrams.
  597  */
  598 void
  599 frag6_freef(struct ip6q *q6)
  600 {
  601         struct ip6asfrag *af6, *down6;
  602 
  603         IP6Q_LOCK_ASSERT();
  604 
  605         for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
  606              af6 = down6) {
  607                 struct mbuf *m = IP6_REASS_MBUF(af6);
  608 
  609                 down6 = af6->ip6af_down;
  610                 frag6_deq(af6);
  611 
  612                 /*
  613                  * Return ICMP time exceeded error for the 1st fragment.
  614                  * Just free other fragments.
  615                  */
  616                 if (af6->ip6af_off == 0) {
  617                         struct ip6_hdr *ip6;
  618 
  619                         /* adjust pointer */
  620                         ip6 = mtod(m, struct ip6_hdr *);
  621 
  622                         /* restore source and destination addresses */
  623                         ip6->ip6_src = q6->ip6q_src;
  624                         ip6->ip6_dst = q6->ip6q_dst;
  625 
  626                         icmp6_error(m, ICMP6_TIME_EXCEEDED,
  627                                     ICMP6_TIME_EXCEED_REASSEMBLY, 0);
  628                 } else
  629                         m_freem(m);
  630                 free(af6, M_FTABLE);
  631         }
  632         frag6_remque(q6);
  633         frag6_nfrags -= q6->ip6q_nfrag;
  634         free(q6, M_FTABLE);
  635         frag6_nfragpackets--;
  636 }
  637 
  638 /*
  639  * Put an ip fragment on a reassembly chain.
  640  * Like insque, but pointers in middle of structure.
  641  */
  642 void
  643 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
  644 {
  645 
  646         IP6Q_LOCK_ASSERT();
  647 
  648         af6->ip6af_up = up6;
  649         af6->ip6af_down = up6->ip6af_down;
  650         up6->ip6af_down->ip6af_up = af6;
  651         up6->ip6af_down = af6;
  652 }
  653 
  654 /*
  655  * To frag6_enq as remque is to insque.
  656  */
  657 void
  658 frag6_deq(struct ip6asfrag *af6)
  659 {
  660 
  661         IP6Q_LOCK_ASSERT();
  662 
  663         af6->ip6af_up->ip6af_down = af6->ip6af_down;
  664         af6->ip6af_down->ip6af_up = af6->ip6af_up;
  665 }
  666 
  667 void
  668 frag6_insque(struct ip6q *new, struct ip6q *old)
  669 {
  670 
  671         IP6Q_LOCK_ASSERT();
  672 
  673         new->ip6q_prev = old;
  674         new->ip6q_next = old->ip6q_next;
  675         old->ip6q_next->ip6q_prev= new;
  676         old->ip6q_next = new;
  677 }
  678 
  679 void
  680 frag6_remque(struct ip6q *p6)
  681 {
  682 
  683         IP6Q_LOCK_ASSERT();
  684 
  685         p6->ip6q_prev->ip6q_next = p6->ip6q_next;
  686         p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
  687 }
  688 
  689 /*
  690  * IPv6 reassembling timer processing;
  691  * if a timer expires on a reassembly
  692  * queue, discard it.
  693  */
  694 void
  695 frag6_slowtimo(void)
  696 {
  697         struct ip6q *q6;
  698 
  699 #if 0
  700         GIANT_REQUIRED; /* XXX bz: ip6_forward_rt */
  701 #endif
  702 
  703         IP6Q_LOCK();
  704         q6 = ip6q.ip6q_next;
  705         if (q6)
  706                 while (q6 != &ip6q) {
  707                         --q6->ip6q_ttl;
  708                         q6 = q6->ip6q_next;
  709                         if (q6->ip6q_prev->ip6q_ttl == 0) {
  710                                 ip6stat.ip6s_fragtimeout++;
  711                                 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  712                                 frag6_freef(q6->ip6q_prev);
  713                         }
  714                 }
  715         /*
  716          * If we are over the maximum number of fragments
  717          * (due to the limit being lowered), drain off
  718          * enough to get down to the new limit.
  719          */
  720         while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
  721             ip6q.ip6q_prev) {
  722                 ip6stat.ip6s_fragoverflow++;
  723                 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  724                 frag6_freef(ip6q.ip6q_prev);
  725         }
  726         IP6Q_UNLOCK();
  727 
  728 #if 0
  729         /*
  730          * Routing changes might produce a better route than we last used;
  731          * make sure we notice eventually, even if forwarding only for one
  732          * destination and the cache is never replaced.
  733          */
  734         if (ip6_forward_rt.ro_rt) {
  735                 RTFREE(ip6_forward_rt.ro_rt);
  736                 ip6_forward_rt.ro_rt = 0;
  737         }
  738         if (ipsrcchk_rt.ro_rt) {
  739                 RTFREE(ipsrcchk_rt.ro_rt);
  740                 ipsrcchk_rt.ro_rt = 0;
  741         }
  742 #endif
  743 }
  744 
  745 /*
  746  * Drain off all datagram fragments.
  747  */
  748 void
  749 frag6_drain(void)
  750 {
  751 
  752         if (IP6Q_TRYLOCK() == 0)
  753                 return;
  754         while (ip6q.ip6q_next != &ip6q) {
  755                 ip6stat.ip6s_fragdropped++;
  756                 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  757                 frag6_freef(ip6q.ip6q_next);
  758         }
  759         IP6Q_UNLOCK();
  760 }

Cache object: 6e885a07f9f6c9c8408855235b3caaf8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.