The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet6/frag6.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: frag6.c,v 1.87 2022/02/22 01:15:02 guenther Exp $     */
    2 /*      $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $  */
    3 
    4 /*
    5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the project nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/mbuf.h>
   36 #include <sys/socket.h>
   37 #include <sys/errno.h>
   38 #include <sys/time.h>
   39 #include <sys/kernel.h>
   40 #include <sys/pool.h>
   41 #include <sys/mutex.h>
   42 
   43 #include <net/if.h>
   44 #include <net/if_var.h>
   45 #include <net/route.h>
   46 
   47 #include <netinet/in.h>
   48 #include <netinet6/in6_var.h>
   49 #include <netinet/ip6.h>
   50 #include <netinet6/ip6_var.h>
   51 #include <netinet/icmp6.h>
   52 #include <netinet/ip.h>         /* for ECN definitions */
   53 
   54 /* Protects `frag6_queue', `frag6_nfragpackets' and `frag6_nfrags'. */
   55 struct mutex frag6_mutex = MUTEX_INITIALIZER(IPL_SOFTNET);
   56 
   57 u_int frag6_nfragpackets;
   58 u_int frag6_nfrags;
   59 TAILQ_HEAD(ip6q_head, ip6q) frag6_queue;        /* ip6 reassemble queue */
   60 
   61 void frag6_freef(struct ip6q *);
   62 void frag6_unlink(struct ip6q *, struct ip6q_head *);
   63 
   64 struct pool ip6af_pool;
   65 struct pool ip6q_pool;
   66 
   67 /*
   68  * Initialise reassembly queue and pools.
   69  */
   70 void
   71 frag6_init(void)
   72 {
   73         pool_init(&ip6af_pool, sizeof(struct ip6asfrag),
   74             0, IPL_SOFTNET, 0, "ip6af", NULL);
   75         pool_init(&ip6q_pool, sizeof(struct ip6q),
   76             0, IPL_SOFTNET, 0, "ip6q", NULL);
   77 
   78         TAILQ_INIT(&frag6_queue);
   79 }
   80 
   81 /*
   82  * In RFC2460, fragment and reassembly rule do not agree with each other,
   83  * in terms of next header field handling in fragment header.
   84  * While the sender will use the same value for all of the fragmented packets,
   85  * receiver is suggested not to check the consistency.
   86  *
   87  * fragment rule (p20):
   88  *      (2) A Fragment header containing:
   89  *      The Next Header value that identifies the first header of
   90  *      the Fragmentable Part of the original packet.
   91  *              -> next header field is same for all fragments
   92  *
   93  * reassembly rule (p21):
   94  *      The Next Header field of the last header of the Unfragmentable
   95  *      Part is obtained from the Next Header field of the first
   96  *      fragment's Fragment header.
   97  *              -> should grab it from the first fragment only
   98  *
   99  * The following note also contradicts with fragment rule - noone is going to
  100  * send different fragment with different next header field.
  101  *
  102  * additional note (p22):
  103  *      The Next Header values in the Fragment headers of different
  104  *      fragments of the same original packet may differ.  Only the value
  105  *      from the Offset zero fragment packet is used for reassembly.
  106  *              -> should grab it from the first fragment only
  107  *
  108  * There is no explicit reason given in the RFC.  Historical reason maybe?
  109  */
  110 /*
  111  * Fragment input
  112  */
  113 int
  114 frag6_input(struct mbuf **mp, int *offp, int proto, int af)
  115 {
  116         struct mbuf *m = *mp, *t;
  117         struct ip6_hdr *ip6;
  118         struct ip6_frag *ip6f;
  119         struct ip6q *q6;
  120         struct ip6asfrag *af6, *ip6af, *naf6, *paf6;
  121         int offset = *offp, nxt, i, next;
  122         int first_frag = 0;
  123         int fragoff, frgpartlen;        /* must be larger than u_int16_t */
  124         u_int8_t ecn, ecn0;
  125 
  126         ip6 = mtod(m, struct ip6_hdr *);
  127         IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
  128         if (ip6f == NULL)
  129                 return IPPROTO_DONE;
  130 
  131         /* jumbo payload can't contain a fragment header */
  132         if (ip6->ip6_plen == 0) {
  133                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
  134                 return IPPROTO_DONE;
  135         }
  136 
  137         /*
  138          * check whether fragment packet's fragment length is
  139          * multiple of 8 octets.
  140          * sizeof(struct ip6_frag) == 8
  141          * sizeof(struct ip6_hdr) = 40
  142          */
  143         if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
  144             (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
  145                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  146                     offsetof(struct ip6_hdr, ip6_plen));
  147                 return IPPROTO_DONE;
  148         }
  149 
  150         ip6stat_inc(ip6s_fragments);
  151 
  152         /* offset now points to data portion */
  153         offset += sizeof(struct ip6_frag);
  154 
  155         /*
  156          * RFC6946:  A host that receives an IPv6 packet which includes
  157          * a Fragment Header with the "Fragment Offset" equal to 0 and
  158          * the "M" bit equal to 0 MUST process such packet in isolation
  159          * from any other packets/fragments.
  160          */
  161         fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
  162         if (fragoff == 0 && !(ip6f->ip6f_offlg & IP6F_MORE_FRAG)) {
  163                 ip6stat_inc(ip6s_reassembled);
  164                 *offp = offset;
  165                 return ip6f->ip6f_nxt;
  166         }
  167 
  168         /* Ignore empty non atomic fragment, do not classify as overlapping. */
  169         if (sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) <= offset) {
  170                 m_freem(m);
  171                 return IPPROTO_DONE;
  172         }
  173 
  174         mtx_enter(&frag6_mutex);
  175 
  176         /*
  177          * Enforce upper bound on number of fragments.
  178          * If maxfrag is 0, never accept fragments.
  179          * If maxfrag is -1, accept all fragments without limitation.
  180          */
  181         if (ip6_maxfrags >= 0 && frag6_nfrags >= (u_int)ip6_maxfrags) {
  182                 mtx_leave(&frag6_mutex);
  183                 goto dropfrag;
  184         }
  185 
  186         TAILQ_FOREACH(q6, &frag6_queue, ip6q_queue)
  187                 if (ip6f->ip6f_ident == q6->ip6q_ident &&
  188                     IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
  189                     IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
  190                         break;
  191 
  192         if (q6 == NULL) {
  193                 /*
  194                  * the first fragment to arrive, create a reassembly queue.
  195                  */
  196                 first_frag = 1;
  197 
  198                 /*
  199                  * Enforce upper bound on number of fragmented packets
  200                  * for which we attempt reassembly;
  201                  * If maxfragpackets is 0, never accept fragments.
  202                  * If maxfragpackets is -1, accept all fragments without
  203                  * limitation.
  204                  */
  205                 if (ip6_maxfragpackets >= 0 &&
  206                     frag6_nfragpackets >= (u_int)ip6_maxfragpackets) {
  207                         mtx_leave(&frag6_mutex);
  208                         goto dropfrag;
  209                 }
  210                 frag6_nfragpackets++;
  211                 q6 = pool_get(&ip6q_pool, PR_NOWAIT | PR_ZERO);
  212                 if (q6 == NULL) {
  213                         mtx_leave(&frag6_mutex);
  214                         goto dropfrag;
  215                 }
  216 
  217                 TAILQ_INSERT_HEAD(&frag6_queue, q6, ip6q_queue);
  218 
  219                 /* ip6q_nxt will be filled afterwards, from 1st fragment */
  220                 LIST_INIT(&q6->ip6q_asfrag);
  221                 q6->ip6q_ident  = ip6f->ip6f_ident;
  222                 q6->ip6q_ttl    = IPV6_FRAGTTL;
  223                 q6->ip6q_src    = ip6->ip6_src;
  224                 q6->ip6q_dst    = ip6->ip6_dst;
  225                 q6->ip6q_ecn    = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  226                 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
  227                 q6->ip6q_nfrag = 0;
  228         }
  229 
  230         /*
  231          * If it's the 1st fragment, record the length of the
  232          * unfragmentable part and the next header of the fragment header.
  233          */
  234         if (fragoff == 0) {
  235                 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
  236                     sizeof(struct ip6_frag);
  237                 q6->ip6q_nxt = ip6f->ip6f_nxt;
  238         }
  239 
  240         /*
  241          * Check that the reassembled packet would not exceed 65535 bytes
  242          * in size.
  243          * If it would exceed, discard the fragment and return an ICMP error.
  244          */
  245         frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
  246         if (q6->ip6q_unfrglen >= 0) {
  247                 /* The 1st fragment has already arrived. */
  248                 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
  249                         mtx_leave(&frag6_mutex);
  250                         icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  251                             offset - sizeof(struct ip6_frag) +
  252                             offsetof(struct ip6_frag, ip6f_offlg));
  253                         return (IPPROTO_DONE);
  254                 }
  255         } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
  256                 mtx_leave(&frag6_mutex);
  257                 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  258                             offset - sizeof(struct ip6_frag) +
  259                                 offsetof(struct ip6_frag, ip6f_offlg));
  260                 return (IPPROTO_DONE);
  261         }
  262         /*
  263          * If it's the first fragment, do the above check for each
  264          * fragment already stored in the reassembly queue.
  265          */
  266         if (fragoff == 0) {
  267                 LIST_FOREACH_SAFE(af6, &q6->ip6q_asfrag, ip6af_list, naf6) {
  268                         if (q6->ip6q_unfrglen + af6->ip6af_off +
  269                             af6->ip6af_frglen > IPV6_MAXPACKET) {
  270                                 struct mbuf *merr = af6->ip6af_m;
  271                                 struct ip6_hdr *ip6err;
  272                                 int erroff = af6->ip6af_offset;
  273 
  274                                 /* dequeue the fragment. */
  275                                 LIST_REMOVE(af6, ip6af_list);
  276                                 pool_put(&ip6af_pool, af6);
  277 
  278                                 /* adjust pointer. */
  279                                 ip6err = mtod(merr, struct ip6_hdr *);
  280 
  281                                 /*
  282                                  * Restore source and destination addresses
  283                                  * in the erroneous IPv6 header.
  284                                  */
  285                                 ip6err->ip6_src = q6->ip6q_src;
  286                                 ip6err->ip6_dst = q6->ip6q_dst;
  287 
  288                                 icmp6_error(merr, ICMP6_PARAM_PROB,
  289                                     ICMP6_PARAMPROB_HEADER,
  290                                     erroff - sizeof(struct ip6_frag) +
  291                                     offsetof(struct ip6_frag, ip6f_offlg));
  292                         }
  293                 }
  294         }
  295 
  296         ip6af = pool_get(&ip6af_pool, PR_NOWAIT | PR_ZERO);
  297         if (ip6af == NULL) {
  298                 mtx_leave(&frag6_mutex);
  299                 goto dropfrag;
  300         }
  301         ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
  302         ip6af->ip6af_off = fragoff;
  303         ip6af->ip6af_frglen = frgpartlen;
  304         ip6af->ip6af_offset = offset;
  305         ip6af->ip6af_m = m;
  306 
  307         if (first_frag) {
  308                 paf6 = NULL;
  309                 goto insert;
  310         }
  311 
  312         /*
  313          * Handle ECN by comparing this segment with the first one;
  314          * if CE is set, do not lose CE.
  315          * drop if CE and not-ECT are mixed for the same packet.
  316          */
  317         ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  318         ecn0 = q6->ip6q_ecn;
  319         if (ecn == IPTOS_ECN_CE) {
  320                 if (ecn0 == IPTOS_ECN_NOTECT) {
  321                         mtx_leave(&frag6_mutex);
  322                         pool_put(&ip6af_pool, ip6af);
  323                         goto dropfrag;
  324                 }
  325                 if (ecn0 != IPTOS_ECN_CE)
  326                         q6->ip6q_ecn = IPTOS_ECN_CE;
  327         }
  328         if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
  329                 mtx_leave(&frag6_mutex);
  330                 pool_put(&ip6af_pool, ip6af);
  331                 goto dropfrag;
  332         }
  333 
  334         /*
  335          * Find a segment which begins after this one does.
  336          */
  337         for (paf6 = NULL, af6 = LIST_FIRST(&q6->ip6q_asfrag);
  338             af6 != NULL;
  339             paf6 = af6, af6 = LIST_NEXT(af6, ip6af_list))
  340                 if (af6->ip6af_off > ip6af->ip6af_off)
  341                         break;
  342 
  343         /*
  344          * RFC 5722, Errata 3089:  When reassembling an IPv6 datagram, if one
  345          * or more its constituent fragments is determined to be an overlapping
  346          * fragment, the entire datagram (and any constituent fragments) MUST
  347          * be silently discarded.
  348          */
  349         if (paf6 != NULL) {
  350                 i = (paf6->ip6af_off + paf6->ip6af_frglen) - ip6af->ip6af_off;
  351                 if (i > 0)
  352                         goto flushfrags;
  353         }
  354         if (af6 != NULL) {
  355                 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
  356                 if (i > 0)
  357                         goto flushfrags;
  358         }
  359 
  360  insert:
  361         /*
  362          * Stick new segment in its place;
  363          * check for complete reassembly.
  364          * Move to front of packet queue, as we are
  365          * the most recently active fragmented packet.
  366          */
  367         if (paf6 != NULL)
  368                 LIST_INSERT_AFTER(paf6, ip6af, ip6af_list);
  369         else
  370                 LIST_INSERT_HEAD(&q6->ip6q_asfrag, ip6af, ip6af_list);
  371         frag6_nfrags++;
  372         q6->ip6q_nfrag++;
  373         next = 0;
  374         for (paf6 = NULL, af6 = LIST_FIRST(&q6->ip6q_asfrag);
  375             af6 != NULL;
  376             paf6 = af6, af6 = LIST_NEXT(af6, ip6af_list)) {
  377                 if (af6->ip6af_off != next) {
  378                         mtx_leave(&frag6_mutex);
  379                         return IPPROTO_DONE;
  380                 }
  381                 next += af6->ip6af_frglen;
  382         }
  383         if (paf6->ip6af_mff) {
  384                 mtx_leave(&frag6_mutex);
  385                 return IPPROTO_DONE;
  386         }
  387 
  388         /*
  389          * Reassembly is complete; concatenate fragments.
  390          */
  391         ip6af = LIST_FIRST(&q6->ip6q_asfrag);
  392         LIST_REMOVE(ip6af, ip6af_list);
  393         t = m = ip6af->ip6af_m;
  394         while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) {
  395                 LIST_REMOVE(af6, ip6af_list);
  396                 while (t->m_next)
  397                         t = t->m_next;
  398                 t->m_next = af6->ip6af_m;
  399                 m_adj(t->m_next, af6->ip6af_offset);
  400                 m_removehdr(t->m_next);
  401                 pool_put(&ip6af_pool, af6);
  402         }
  403 
  404         /* adjust offset to point where the original next header starts */
  405         offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
  406         pool_put(&ip6af_pool, ip6af);
  407         ip6 = mtod(m, struct ip6_hdr *);
  408         ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
  409         ip6->ip6_src = q6->ip6q_src;
  410         ip6->ip6_dst = q6->ip6q_dst;
  411         if (q6->ip6q_ecn == IPTOS_ECN_CE)
  412                 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
  413         nxt = q6->ip6q_nxt;
  414 
  415         /* Delete frag6 header */
  416         if (frag6_deletefraghdr(m, offset) != 0) {
  417                 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue);
  418                 frag6_nfrags -= q6->ip6q_nfrag;
  419                 frag6_nfragpackets--;
  420                 mtx_leave(&frag6_mutex);
  421                 pool_put(&ip6q_pool, q6);
  422                 goto dropfrag;
  423         }
  424 
  425         TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue);
  426         frag6_nfrags -= q6->ip6q_nfrag;
  427         frag6_nfragpackets--;
  428 
  429         mtx_leave(&frag6_mutex);
  430 
  431         pool_put(&ip6q_pool, q6);
  432 
  433         m_calchdrlen(m);
  434 
  435         /*
  436          * Restore NXT to the original.
  437          */
  438         {
  439                 int prvnxt = ip6_get_prevhdr(m, offset);
  440                 uint8_t *prvnxtp;
  441 
  442                 IP6_EXTHDR_GET(prvnxtp, uint8_t *, m, prvnxt,
  443                     sizeof(*prvnxtp));
  444                 if (prvnxtp == NULL)
  445                         goto dropfrag;
  446                 *prvnxtp = nxt;
  447         }
  448 
  449         ip6stat_inc(ip6s_reassembled);
  450 
  451         /*
  452          * Tell launch routine the next header
  453          */
  454 
  455         *mp = m;
  456         *offp = offset;
  457 
  458         return nxt;
  459 
  460  flushfrags:
  461         TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue);
  462         frag6_nfrags -= q6->ip6q_nfrag;
  463         frag6_nfragpackets--;
  464 
  465         mtx_leave(&frag6_mutex);
  466 
  467         pool_put(&ip6af_pool, ip6af);
  468 
  469         while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) {
  470                 LIST_REMOVE(af6, ip6af_list);
  471                 m_freem(af6->ip6af_m);
  472                 pool_put(&ip6af_pool, af6);
  473         }
  474         ip6stat_add(ip6s_fragdropped, q6->ip6q_nfrag + 1);
  475         pool_put(&ip6q_pool, q6);
  476         m_freem(m);
  477         return IPPROTO_DONE;
  478 
  479  dropfrag:
  480         ip6stat_inc(ip6s_fragdropped);
  481         m_freem(m);
  482         return IPPROTO_DONE;
  483 }
  484 
  485 /*
  486  * Delete fragment header after the unfragmentable header portions.
  487  */
  488 int
  489 frag6_deletefraghdr(struct mbuf *m, int offset)
  490 {
  491         struct mbuf *t;
  492 
  493         if (m->m_len >= offset + sizeof(struct ip6_frag)) {
  494                 memmove(mtod(m, caddr_t) + sizeof(struct ip6_frag),
  495                     mtod(m, caddr_t), offset);
  496                 m->m_data += sizeof(struct ip6_frag);
  497                 m->m_len -= sizeof(struct ip6_frag);
  498         } else {
  499                 /* this comes with no copy if the boundary is on cluster */
  500                 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL)
  501                         return (ENOBUFS);
  502                 m_adj(t, sizeof(struct ip6_frag));
  503                 m_cat(m, t);
  504         }
  505 
  506         return (0);
  507 }
  508 
  509 /*
  510  * Free a fragment reassembly header and all
  511  * associated datagrams.
  512  * The header must not be in any queue.
  513  */
  514 void
  515 frag6_freef(struct ip6q *q6)
  516 {
  517         struct ip6asfrag *af6;
  518 
  519         while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) {
  520                 struct mbuf *m = af6->ip6af_m;
  521 
  522                 LIST_REMOVE(af6, ip6af_list);
  523 
  524                 /*
  525                  * Return ICMP time exceeded error for the 1st fragment.
  526                  * Just free other fragments.
  527                  */
  528                 if (af6->ip6af_off == 0) {
  529                         struct ip6_hdr *ip6;
  530 
  531                         /* adjust pointer */
  532                         ip6 = mtod(m, struct ip6_hdr *);
  533 
  534                         /* restore source and destination addresses */
  535                         ip6->ip6_src = q6->ip6q_src;
  536                         ip6->ip6_dst = q6->ip6q_dst;
  537 
  538                         NET_LOCK();
  539                         icmp6_error(m, ICMP6_TIME_EXCEEDED,
  540                                     ICMP6_TIME_EXCEED_REASSEMBLY, 0);
  541                         NET_UNLOCK();
  542                 } else
  543                         m_freem(m);
  544                 pool_put(&ip6af_pool, af6);
  545         }
  546         pool_put(&ip6q_pool, q6);
  547 }
  548 
  549 /*
  550  * Unlinks a fragment reassembly header from the reassembly queue
  551  * and inserts it into a given remove queue.
  552  */
  553 void
  554 frag6_unlink(struct ip6q *q6, struct ip6q_head *rmq6)
  555 {
  556         MUTEX_ASSERT_LOCKED(&frag6_mutex);
  557 
  558         TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue);
  559         TAILQ_INSERT_HEAD(rmq6, q6, ip6q_queue);
  560         frag6_nfrags -= q6->ip6q_nfrag;
  561         frag6_nfragpackets--;
  562 }
  563 
  564 /*
  565  * IPv6 reassembling timer processing;
  566  * if a timer expires on a reassembly
  567  * queue, discard it.
  568  */
  569 void
  570 frag6_slowtimo(void)
  571 {
  572         struct ip6q_head rmq6;
  573         struct ip6q *q6, *nq6;
  574 
  575         TAILQ_INIT(&rmq6);
  576 
  577         mtx_enter(&frag6_mutex);
  578 
  579         TAILQ_FOREACH_SAFE(q6, &frag6_queue, ip6q_queue, nq6) {
  580                 if (--q6->ip6q_ttl == 0) {
  581                         ip6stat_inc(ip6s_fragtimeout);
  582                         frag6_unlink(q6, &rmq6);
  583                 }
  584         }
  585 
  586         /*
  587          * If we are over the maximum number of fragments
  588          * (due to the limit being lowered), drain off
  589          * enough to get down to the new limit.
  590          */
  591         while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
  592             !TAILQ_EMPTY(&frag6_queue)) {
  593                 ip6stat_inc(ip6s_fragoverflow);
  594                 frag6_unlink(TAILQ_LAST(&frag6_queue, ip6q_head), &rmq6);
  595         }
  596 
  597         mtx_leave(&frag6_mutex);
  598 
  599         while ((q6 = TAILQ_FIRST(&rmq6)) != NULL) {
  600                 TAILQ_REMOVE(&rmq6, q6, ip6q_queue);
  601                 frag6_freef(q6);
  602         }
  603 }

Cache object: 621255ff548765f2adc68854b50e2f00


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.