The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_subr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)tcp_subr.c  8.2 (Berkeley) 5/24/95
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/9.0/sys/netinet/tcp_subr.c 224010 2011-07-14 13:44:48Z bz $");
   34 
   35 #include "opt_compat.h"
   36 #include "opt_inet.h"
   37 #include "opt_inet6.h"
   38 #include "opt_ipsec.h"
   39 #include "opt_tcpdebug.h"
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/callout.h>
   44 #include <sys/hhook.h>
   45 #include <sys/kernel.h>
   46 #include <sys/khelp.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/jail.h>
   49 #include <sys/malloc.h>
   50 #include <sys/mbuf.h>
   51 #ifdef INET6
   52 #include <sys/domain.h>
   53 #endif
   54 #include <sys/priv.h>
   55 #include <sys/proc.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/protosw.h>
   59 #include <sys/random.h>
   60 
   61 #include <vm/uma.h>
   62 
   63 #include <net/route.h>
   64 #include <net/if.h>
   65 #include <net/vnet.h>
   66 
   67 #include <netinet/cc.h>
   68 #include <netinet/in.h>
   69 #include <netinet/in_pcb.h>
   70 #include <netinet/in_systm.h>
   71 #include <netinet/in_var.h>
   72 #include <netinet/ip.h>
   73 #include <netinet/ip_icmp.h>
   74 #include <netinet/ip_var.h>
   75 #ifdef INET6
   76 #include <netinet/ip6.h>
   77 #include <netinet6/in6_pcb.h>
   78 #include <netinet6/ip6_var.h>
   79 #include <netinet6/scope6_var.h>
   80 #include <netinet6/nd6.h>
   81 #endif
   82 
   83 #include <netinet/tcp_fsm.h>
   84 #include <netinet/tcp_seq.h>
   85 #include <netinet/tcp_timer.h>
   86 #include <netinet/tcp_var.h>
   87 #include <netinet/tcp_syncache.h>
   88 #include <netinet/tcp_offload.h>
   89 #ifdef INET6
   90 #include <netinet6/tcp6_var.h>
   91 #endif
   92 #include <netinet/tcpip.h>
   93 #ifdef TCPDEBUG
   94 #include <netinet/tcp_debug.h>
   95 #endif
   96 #ifdef INET6
   97 #include <netinet6/ip6protosw.h>
   98 #endif
   99 
  100 #ifdef IPSEC
  101 #include <netipsec/ipsec.h>
  102 #include <netipsec/xform.h>
  103 #ifdef INET6
  104 #include <netipsec/ipsec6.h>
  105 #endif
  106 #include <netipsec/key.h>
  107 #include <sys/syslog.h>
  108 #endif /*IPSEC*/
  109 
  110 #include <machine/in_cksum.h>
  111 #include <sys/md5.h>
  112 
  113 #include <security/mac/mac_framework.h>
  114 
  115 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
  116 #ifdef INET6
  117 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
  118 #endif
  119 
  120 static int
  121 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
  122 {
  123         int error, new;
  124 
  125         new = V_tcp_mssdflt;
  126         error = sysctl_handle_int(oidp, &new, 0, req);
  127         if (error == 0 && req->newptr) {
  128                 if (new < TCP_MINMSS)
  129                         error = EINVAL;
  130                 else
  131                         V_tcp_mssdflt = new;
  132         }
  133         return (error);
  134 }
  135 
  136 SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
  137     CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_mssdflt), 0,
  138     &sysctl_net_inet_tcp_mss_check, "I",
  139     "Default TCP Maximum Segment Size");
  140 
  141 #ifdef INET6
  142 static int
  143 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
  144 {
  145         int error, new;
  146 
  147         new = V_tcp_v6mssdflt;
  148         error = sysctl_handle_int(oidp, &new, 0, req);
  149         if (error == 0 && req->newptr) {
  150                 if (new < TCP_MINMSS)
  151                         error = EINVAL;
  152                 else
  153                         V_tcp_v6mssdflt = new;
  154         }
  155         return (error);
  156 }
  157 
  158 SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
  159     CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_v6mssdflt), 0,
  160     &sysctl_net_inet_tcp_mss_v6_check, "I",
  161    "Default TCP Maximum Segment Size for IPv6");
  162 #endif /* INET6 */
  163 
  164 /*
  165  * Minimum MSS we accept and use. This prevents DoS attacks where
  166  * we are forced to a ridiculous low MSS like 20 and send hundreds
  167  * of packets instead of one. The effect scales with the available
  168  * bandwidth and quickly saturates the CPU and network interface
  169  * with packet generation and sending. Set to zero to disable MINMSS
  170  * checking. This setting prevents us from sending too small packets.
  171  */
  172 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
  173 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
  174      &VNET_NAME(tcp_minmss), 0,
  175     "Minmum TCP Maximum Segment Size");
  176 
  177 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
  178 SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
  179     &VNET_NAME(tcp_do_rfc1323), 0,
  180     "Enable rfc1323 (high performance TCP) extensions");
  181 
  182 static int      tcp_log_debug = 0;
  183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
  184     &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
  185 
  186 static int      tcp_tcbhashsize = 0;
  187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
  188     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
  189 
  190 static int      do_tcpdrain = 1;
  191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
  192     "Enable tcp_drain routine for extra help when low on mbufs");
  193 
  194 SYSCTL_VNET_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
  195     &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
  196 
  197 static VNET_DEFINE(int, icmp_may_rst) = 1;
  198 #define V_icmp_may_rst                  VNET(icmp_may_rst)
  199 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
  200     &VNET_NAME(icmp_may_rst), 0,
  201     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
  202 
  203 static VNET_DEFINE(int, tcp_isn_reseed_interval) = 0;
  204 #define V_tcp_isn_reseed_interval       VNET(tcp_isn_reseed_interval)
  205 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
  206     &VNET_NAME(tcp_isn_reseed_interval), 0,
  207     "Seconds between reseeding of ISN secret");
  208 
  209 static int      tcp_soreceive_stream = 0;
  210 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
  211     &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
  212 
  213 #ifdef TCP_SIGNATURE
  214 static int      tcp_sig_checksigs = 1;
  215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, signature_verify_input, CTLFLAG_RW,
  216     &tcp_sig_checksigs, 0, "Verify RFC2385 digests on inbound traffic");
  217 #endif
  218 
  219 VNET_DEFINE(uma_zone_t, sack_hole_zone);
  220 #define V_sack_hole_zone                VNET(sack_hole_zone)
  221 
  222 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
  223 
  224 static struct inpcb *tcp_notify(struct inpcb *, int);
  225 static char *   tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
  226                     void *ip4hdr, const void *ip6hdr);
  227 
  228 /*
  229  * Target size of TCP PCB hash tables. Must be a power of two.
  230  *
  231  * Note that this can be overridden by the kernel environment
  232  * variable net.inet.tcp.tcbhashsize
  233  */
  234 #ifndef TCBHASHSIZE
  235 #define TCBHASHSIZE     512
  236 #endif
  237 
  238 /*
  239  * XXX
  240  * Callouts should be moved into struct tcp directly.  They are currently
  241  * separate because the tcpcb structure is exported to userland for sysctl
  242  * parsing purposes, which do not know about callouts.
  243  */
  244 struct tcpcb_mem {
  245         struct  tcpcb           tcb;
  246         struct  tcp_timer       tt;
  247         struct  cc_var          ccv;
  248         struct  osd             osd;
  249 };
  250 
  251 static VNET_DEFINE(uma_zone_t, tcpcb_zone);
  252 #define V_tcpcb_zone                    VNET(tcpcb_zone)
  253 
  254 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
  255 static struct mtx isn_mtx;
  256 
  257 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
  258 #define ISN_LOCK()      mtx_lock(&isn_mtx)
  259 #define ISN_UNLOCK()    mtx_unlock(&isn_mtx)
  260 
  261 /*
  262  * TCP initialization.
  263  */
  264 static void
  265 tcp_zone_change(void *tag)
  266 {
  267 
  268         uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
  269         uma_zone_set_max(V_tcpcb_zone, maxsockets);
  270         tcp_tw_zone_change();
  271 }
  272 
  273 static int
  274 tcp_inpcb_init(void *mem, int size, int flags)
  275 {
  276         struct inpcb *inp = mem;
  277 
  278         INP_LOCK_INIT(inp, "inp", "tcpinp");
  279         return (0);
  280 }
  281 
  282 void
  283 tcp_init(void)
  284 {
  285         int hashsize;
  286 
  287         if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
  288             &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
  289                 printf("%s: WARNING: unable to register helper hook\n", __func__);
  290         if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
  291             &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
  292                 printf("%s: WARNING: unable to register helper hook\n", __func__);
  293 
  294         hashsize = TCBHASHSIZE;
  295         TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
  296         if (!powerof2(hashsize)) {
  297                 printf("WARNING: TCB hash size not a power of 2\n");
  298                 hashsize = 512; /* safe default */
  299         }
  300         in_pcbinfo_init(&V_tcbinfo, "tcp", &V_tcb, hashsize, hashsize,
  301             "tcp_inpcb", tcp_inpcb_init, NULL, UMA_ZONE_NOFREE,
  302             IPI_HASHFIELDS_4TUPLE);
  303 
  304         /*
  305          * These have to be type stable for the benefit of the timers.
  306          */
  307         V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
  308             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  309         uma_zone_set_max(V_tcpcb_zone, maxsockets);
  310 
  311         tcp_tw_init();
  312         syncache_init();
  313         tcp_hc_init();
  314         tcp_reass_init();
  315 
  316         TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
  317         V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
  318             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  319 
  320         /* Skip initialization of globals for non-default instances. */
  321         if (!IS_DEFAULT_VNET(curvnet))
  322                 return;
  323 
  324         /* XXX virtualize those bellow? */
  325         tcp_delacktime = TCPTV_DELACK;
  326         tcp_keepinit = TCPTV_KEEP_INIT;
  327         tcp_keepidle = TCPTV_KEEP_IDLE;
  328         tcp_keepintvl = TCPTV_KEEPINTVL;
  329         tcp_maxpersistidle = TCPTV_KEEP_IDLE;
  330         tcp_msl = TCPTV_MSL;
  331         tcp_rexmit_min = TCPTV_MIN;
  332         if (tcp_rexmit_min < 1)
  333                 tcp_rexmit_min = 1;
  334         tcp_rexmit_slop = TCPTV_CPU_VAR;
  335         tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
  336         tcp_tcbhashsize = hashsize;
  337 
  338         TUNABLE_INT_FETCH("net.inet.tcp.soreceive_stream", &tcp_soreceive_stream);
  339         if (tcp_soreceive_stream) {
  340 #ifdef INET
  341                 tcp_usrreqs.pru_soreceive = soreceive_stream;
  342 #endif
  343 #ifdef INET6
  344                 tcp6_usrreqs.pru_soreceive = soreceive_stream;
  345 #endif /* INET6 */
  346         }
  347 
  348 #ifdef INET6
  349 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
  350 #else /* INET6 */
  351 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
  352 #endif /* INET6 */
  353         if (max_protohdr < TCP_MINPROTOHDR)
  354                 max_protohdr = TCP_MINPROTOHDR;
  355         if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
  356                 panic("tcp_init");
  357 #undef TCP_MINPROTOHDR
  358 
  359         ISN_LOCK_INIT();
  360         EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
  361                 SHUTDOWN_PRI_DEFAULT);
  362         EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
  363                 EVENTHANDLER_PRI_ANY);
  364 }
  365 
  366 #ifdef VIMAGE
  367 void
  368 tcp_destroy(void)
  369 {
  370 
  371         tcp_reass_destroy();
  372         tcp_hc_destroy();
  373         syncache_destroy();
  374         tcp_tw_destroy();
  375         in_pcbinfo_destroy(&V_tcbinfo);
  376         uma_zdestroy(V_sack_hole_zone);
  377         uma_zdestroy(V_tcpcb_zone);
  378 }
  379 #endif
  380 
  381 void
  382 tcp_fini(void *xtp)
  383 {
  384 
  385 }
  386 
  387 /*
  388  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
  389  * tcp_template used to store this data in mbufs, but we now recopy it out
  390  * of the tcpcb each time to conserve mbufs.
  391  */
  392 void
  393 tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
  394 {
  395         struct tcphdr *th = (struct tcphdr *)tcp_ptr;
  396 
  397         INP_WLOCK_ASSERT(inp);
  398 
  399 #ifdef INET6
  400         if ((inp->inp_vflag & INP_IPV6) != 0) {
  401                 struct ip6_hdr *ip6;
  402 
  403                 ip6 = (struct ip6_hdr *)ip_ptr;
  404                 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
  405                         (inp->inp_flow & IPV6_FLOWINFO_MASK);
  406                 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
  407                         (IPV6_VERSION & IPV6_VERSION_MASK);
  408                 ip6->ip6_nxt = IPPROTO_TCP;
  409                 ip6->ip6_plen = htons(sizeof(struct tcphdr));
  410                 ip6->ip6_src = inp->in6p_laddr;
  411                 ip6->ip6_dst = inp->in6p_faddr;
  412         }
  413 #endif /* INET6 */
  414 #if defined(INET6) && defined(INET)
  415         else
  416 #endif
  417 #ifdef INET
  418         {
  419                 struct ip *ip;
  420 
  421                 ip = (struct ip *)ip_ptr;
  422                 ip->ip_v = IPVERSION;
  423                 ip->ip_hl = 5;
  424                 ip->ip_tos = inp->inp_ip_tos;
  425                 ip->ip_len = 0;
  426                 ip->ip_id = 0;
  427                 ip->ip_off = 0;
  428                 ip->ip_ttl = inp->inp_ip_ttl;
  429                 ip->ip_sum = 0;
  430                 ip->ip_p = IPPROTO_TCP;
  431                 ip->ip_src = inp->inp_laddr;
  432                 ip->ip_dst = inp->inp_faddr;
  433         }
  434 #endif /* INET */
  435         th->th_sport = inp->inp_lport;
  436         th->th_dport = inp->inp_fport;
  437         th->th_seq = 0;
  438         th->th_ack = 0;
  439         th->th_x2 = 0;
  440         th->th_off = 5;
  441         th->th_flags = 0;
  442         th->th_win = 0;
  443         th->th_urp = 0;
  444         th->th_sum = 0;         /* in_pseudo() is called later for ipv4 */
  445 }
  446 
  447 /*
  448  * Create template to be used to send tcp packets on a connection.
  449  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
  450  * use for this function is in keepalives, which use tcp_respond.
  451  */
  452 struct tcptemp *
  453 tcpip_maketemplate(struct inpcb *inp)
  454 {
  455         struct tcptemp *t;
  456 
  457         t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
  458         if (t == NULL)
  459                 return (NULL);
  460         tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
  461         return (t);
  462 }
  463 
  464 /*
  465  * Send a single message to the TCP at address specified by
  466  * the given TCP/IP header.  If m == NULL, then we make a copy
  467  * of the tcpiphdr at ti and send directly to the addressed host.
  468  * This is used to force keep alive messages out using the TCP
  469  * template for a connection.  If flags are given then we send
  470  * a message back to the TCP which originated the * segment ti,
  471  * and discard the mbuf containing it and any other attached mbufs.
  472  *
  473  * In any case the ack and sequence number of the transmitted
  474  * segment are as specified by the parameters.
  475  *
  476  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
  477  */
  478 void
  479 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
  480     tcp_seq ack, tcp_seq seq, int flags)
  481 {
  482         int tlen;
  483         int win = 0;
  484         struct ip *ip;
  485         struct tcphdr *nth;
  486 #ifdef INET6
  487         struct ip6_hdr *ip6;
  488         int isipv6;
  489 #endif /* INET6 */
  490         int ipflags = 0;
  491         struct inpcb *inp;
  492 
  493         KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
  494 
  495 #ifdef INET6
  496         isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
  497         ip6 = ipgen;
  498 #endif /* INET6 */
  499         ip = ipgen;
  500 
  501         if (tp != NULL) {
  502                 inp = tp->t_inpcb;
  503                 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
  504                 INP_WLOCK_ASSERT(inp);
  505         } else
  506                 inp = NULL;
  507 
  508         if (tp != NULL) {
  509                 if (!(flags & TH_RST)) {
  510                         win = sbspace(&inp->inp_socket->so_rcv);
  511                         if (win > (long)TCP_MAXWIN << tp->rcv_scale)
  512                                 win = (long)TCP_MAXWIN << tp->rcv_scale;
  513                 }
  514         }
  515         if (m == NULL) {
  516                 m = m_gethdr(M_DONTWAIT, MT_DATA);
  517                 if (m == NULL)
  518                         return;
  519                 tlen = 0;
  520                 m->m_data += max_linkhdr;
  521 #ifdef INET6
  522                 if (isipv6) {
  523                         bcopy((caddr_t)ip6, mtod(m, caddr_t),
  524                               sizeof(struct ip6_hdr));
  525                         ip6 = mtod(m, struct ip6_hdr *);
  526                         nth = (struct tcphdr *)(ip6 + 1);
  527                 } else
  528 #endif /* INET6 */
  529               {
  530                 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
  531                 ip = mtod(m, struct ip *);
  532                 nth = (struct tcphdr *)(ip + 1);
  533               }
  534                 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
  535                 flags = TH_ACK;
  536         } else {
  537                 /*
  538                  *  reuse the mbuf. 
  539                  * XXX MRT We inherrit the FIB, which is lucky.
  540                  */
  541                 m_freem(m->m_next);
  542                 m->m_next = NULL;
  543                 m->m_data = (caddr_t)ipgen;
  544                 m_addr_changed(m);
  545                 /* m_len is set later */
  546                 tlen = 0;
  547 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
  548 #ifdef INET6
  549                 if (isipv6) {
  550                         xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
  551                         nth = (struct tcphdr *)(ip6 + 1);
  552                 } else
  553 #endif /* INET6 */
  554               {
  555                 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
  556                 nth = (struct tcphdr *)(ip + 1);
  557               }
  558                 if (th != nth) {
  559                         /*
  560                          * this is usually a case when an extension header
  561                          * exists between the IPv6 header and the
  562                          * TCP header.
  563                          */
  564                         nth->th_sport = th->th_sport;
  565                         nth->th_dport = th->th_dport;
  566                 }
  567                 xchg(nth->th_dport, nth->th_sport, uint16_t);
  568 #undef xchg
  569         }
  570 #ifdef INET6
  571         if (isipv6) {
  572                 ip6->ip6_flow = 0;
  573                 ip6->ip6_vfc = IPV6_VERSION;
  574                 ip6->ip6_nxt = IPPROTO_TCP;
  575                 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
  576                                                 tlen));
  577                 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
  578         }
  579 #endif
  580 #if defined(INET) && defined(INET6)
  581         else
  582 #endif
  583 #ifdef INET
  584         {
  585                 tlen += sizeof (struct tcpiphdr);
  586                 ip->ip_len = tlen;
  587                 ip->ip_ttl = V_ip_defttl;
  588                 if (V_path_mtu_discovery)
  589                         ip->ip_off |= IP_DF;
  590         }
  591 #endif
  592         m->m_len = tlen;
  593         m->m_pkthdr.len = tlen;
  594         m->m_pkthdr.rcvif = NULL;
  595 #ifdef MAC
  596         if (inp != NULL) {
  597                 /*
  598                  * Packet is associated with a socket, so allow the
  599                  * label of the response to reflect the socket label.
  600                  */
  601                 INP_WLOCK_ASSERT(inp);
  602                 mac_inpcb_create_mbuf(inp, m);
  603         } else {
  604                 /*
  605                  * Packet is not associated with a socket, so possibly
  606                  * update the label in place.
  607                  */
  608                 mac_netinet_tcp_reply(m);
  609         }
  610 #endif
  611         nth->th_seq = htonl(seq);
  612         nth->th_ack = htonl(ack);
  613         nth->th_x2 = 0;
  614         nth->th_off = sizeof (struct tcphdr) >> 2;
  615         nth->th_flags = flags;
  616         if (tp != NULL)
  617                 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
  618         else
  619                 nth->th_win = htons((u_short)win);
  620         nth->th_urp = 0;
  621 #ifdef INET6
  622         if (isipv6) {
  623                 nth->th_sum = 0;
  624                 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
  625                                         sizeof(struct ip6_hdr),
  626                                         tlen - sizeof(struct ip6_hdr));
  627                 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
  628                     NULL, NULL);
  629         }
  630 #endif /* INET6 */
  631 #if defined(INET6) && defined(INET)
  632         else
  633 #endif
  634 #ifdef INET
  635         {
  636                 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
  637                     htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
  638                 m->m_pkthdr.csum_flags = CSUM_TCP;
  639                 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
  640         }
  641 #endif /* INET */
  642 #ifdef TCPDEBUG
  643         if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
  644                 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
  645 #endif
  646 #ifdef INET6
  647         if (isipv6)
  648                 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
  649 #endif /* INET6 */
  650 #if defined(INET) && defined(INET6)
  651         else
  652 #endif
  653 #ifdef INET
  654                 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
  655 #endif
  656 }
  657 
  658 /*
  659  * Create a new TCP control block, making an
  660  * empty reassembly queue and hooking it to the argument
  661  * protocol control block.  The `inp' parameter must have
  662  * come from the zone allocator set up in tcp_init().
  663  */
  664 struct tcpcb *
  665 tcp_newtcpcb(struct inpcb *inp)
  666 {
  667         struct tcpcb_mem *tm;
  668         struct tcpcb *tp;
  669 #ifdef INET6
  670         int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
  671 #endif /* INET6 */
  672 
  673         tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
  674         if (tm == NULL)
  675                 return (NULL);
  676         tp = &tm->tcb;
  677 
  678         /* Initialise cc_var struct for this tcpcb. */
  679         tp->ccv = &tm->ccv;
  680         tp->ccv->type = IPPROTO_TCP;
  681         tp->ccv->ccvc.tcp = tp;
  682 
  683         /*
  684          * Use the current system default CC algorithm.
  685          */
  686         CC_LIST_RLOCK();
  687         KASSERT(!STAILQ_EMPTY(&cc_list), ("cc_list is empty!"));
  688         CC_ALGO(tp) = CC_DEFAULT();
  689         CC_LIST_RUNLOCK();
  690 
  691         if (CC_ALGO(tp)->cb_init != NULL)
  692                 if (CC_ALGO(tp)->cb_init(tp->ccv) > 0) {
  693                         uma_zfree(V_tcpcb_zone, tm);
  694                         return (NULL);
  695                 }
  696 
  697         tp->osd = &tm->osd;
  698         if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
  699                 uma_zfree(V_tcpcb_zone, tm);
  700                 return (NULL);
  701         }
  702 
  703 #ifdef VIMAGE
  704         tp->t_vnet = inp->inp_vnet;
  705 #endif
  706         tp->t_timers = &tm->tt;
  707         /*      LIST_INIT(&tp->t_segq); */      /* XXX covered by M_ZERO */
  708         tp->t_maxseg = tp->t_maxopd =
  709 #ifdef INET6
  710                 isipv6 ? V_tcp_v6mssdflt :
  711 #endif /* INET6 */
  712                 V_tcp_mssdflt;
  713 
  714         /* Set up our timeouts. */
  715         callout_init(&tp->t_timers->tt_rexmt, CALLOUT_MPSAFE);
  716         callout_init(&tp->t_timers->tt_persist, CALLOUT_MPSAFE);
  717         callout_init(&tp->t_timers->tt_keep, CALLOUT_MPSAFE);
  718         callout_init(&tp->t_timers->tt_2msl, CALLOUT_MPSAFE);
  719         callout_init(&tp->t_timers->tt_delack, CALLOUT_MPSAFE);
  720 
  721         if (V_tcp_do_rfc1323)
  722                 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
  723         if (V_tcp_do_sack)
  724                 tp->t_flags |= TF_SACK_PERMIT;
  725         TAILQ_INIT(&tp->snd_holes);
  726         tp->t_inpcb = inp;      /* XXX */
  727         /*
  728          * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
  729          * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
  730          * reasonable initial retransmit time.
  731          */
  732         tp->t_srtt = TCPTV_SRTTBASE;
  733         tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
  734         tp->t_rttmin = tcp_rexmit_min;
  735         tp->t_rxtcur = TCPTV_RTOBASE;
  736         tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
  737         tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
  738         tp->t_rcvtime = ticks;
  739         /*
  740          * IPv4 TTL initialization is necessary for an IPv6 socket as well,
  741          * because the socket may be bound to an IPv6 wildcard address,
  742          * which may match an IPv4-mapped IPv6 address.
  743          */
  744         inp->inp_ip_ttl = V_ip_defttl;
  745         inp->inp_ppcb = tp;
  746         return (tp);            /* XXX */
  747 }
  748 
  749 /*
  750  * Switch the congestion control algorithm back to NewReno for any active
  751  * control blocks using an algorithm which is about to go away.
  752  * This ensures the CC framework can allow the unload to proceed without leaving
  753  * any dangling pointers which would trigger a panic.
  754  * Returning non-zero would inform the CC framework that something went wrong
  755  * and it would be unsafe to allow the unload to proceed. However, there is no
  756  * way for this to occur with this implementation so we always return zero.
  757  */
  758 int
  759 tcp_ccalgounload(struct cc_algo *unload_algo)
  760 {
  761         struct cc_algo *tmpalgo;
  762         struct inpcb *inp;
  763         struct tcpcb *tp;
  764         VNET_ITERATOR_DECL(vnet_iter);
  765 
  766         /*
  767          * Check all active control blocks across all network stacks and change
  768          * any that are using "unload_algo" back to NewReno. If "unload_algo"
  769          * requires cleanup code to be run, call it.
  770          */
  771         VNET_LIST_RLOCK();
  772         VNET_FOREACH(vnet_iter) {
  773                 CURVNET_SET(vnet_iter);
  774                 INP_INFO_RLOCK(&V_tcbinfo);
  775                 /*
  776                  * New connections already part way through being initialised
  777                  * with the CC algo we're removing will not race with this code
  778                  * because the INP_INFO_WLOCK is held during initialisation. We
  779                  * therefore don't enter the loop below until the connection
  780                  * list has stabilised.
  781                  */
  782                 LIST_FOREACH(inp, &V_tcb, inp_list) {
  783                         INP_WLOCK(inp);
  784                         /* Important to skip tcptw structs. */
  785                         if (!(inp->inp_flags & INP_TIMEWAIT) &&
  786                             (tp = intotcpcb(inp)) != NULL) {
  787                                 /*
  788                                  * By holding INP_WLOCK here, we are assured
  789                                  * that the connection is not currently
  790                                  * executing inside the CC module's functions
  791                                  * i.e. it is safe to make the switch back to
  792                                  * NewReno.
  793                                  */
  794                                 if (CC_ALGO(tp) == unload_algo) {
  795                                         tmpalgo = CC_ALGO(tp);
  796                                         /* NewReno does not require any init. */
  797                                         CC_ALGO(tp) = &newreno_cc_algo;
  798                                         if (tmpalgo->cb_destroy != NULL)
  799                                                 tmpalgo->cb_destroy(tp->ccv);
  800                                 }
  801                         }
  802                         INP_WUNLOCK(inp);
  803                 }
  804                 INP_INFO_RUNLOCK(&V_tcbinfo);
  805                 CURVNET_RESTORE();
  806         }
  807         VNET_LIST_RUNLOCK();
  808 
  809         return (0);
  810 }
  811 
  812 /*
  813  * Drop a TCP connection, reporting
  814  * the specified error.  If connection is synchronized,
  815  * then send a RST to peer.
  816  */
  817 struct tcpcb *
  818 tcp_drop(struct tcpcb *tp, int errno)
  819 {
  820         struct socket *so = tp->t_inpcb->inp_socket;
  821 
  822         INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
  823         INP_WLOCK_ASSERT(tp->t_inpcb);
  824 
  825         if (TCPS_HAVERCVDSYN(tp->t_state)) {
  826                 tp->t_state = TCPS_CLOSED;
  827                 (void) tcp_output_reset(tp);
  828                 TCPSTAT_INC(tcps_drops);
  829         } else
  830                 TCPSTAT_INC(tcps_conndrops);
  831         if (errno == ETIMEDOUT && tp->t_softerror)
  832                 errno = tp->t_softerror;
  833         so->so_error = errno;
  834         return (tcp_close(tp));
  835 }
  836 
  837 void
  838 tcp_discardcb(struct tcpcb *tp)
  839 {
  840         struct inpcb *inp = tp->t_inpcb;
  841         struct socket *so = inp->inp_socket;
  842 #ifdef INET6
  843         int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
  844 #endif /* INET6 */
  845 
  846         INP_WLOCK_ASSERT(inp);
  847 
  848         /*
  849          * Make sure that all of our timers are stopped before we delete the
  850          * PCB.
  851          *
  852          * XXXRW: Really, we would like to use callout_drain() here in order
  853          * to avoid races experienced in tcp_timer.c where a timer is already
  854          * executing at this point.  However, we can't, both because we're
  855          * running in a context where we can't sleep, and also because we
  856          * hold locks required by the timers.  What we instead need to do is
  857          * test to see if callout_drain() is required, and if so, defer some
  858          * portion of the remainder of tcp_discardcb() to an asynchronous
  859          * context that can callout_drain() and then continue.  Some care
  860          * will be required to ensure that no further processing takes place
  861          * on the tcpcb, even though it hasn't been freed (a flag?).
  862          */
  863         callout_stop(&tp->t_timers->tt_rexmt);
  864         callout_stop(&tp->t_timers->tt_persist);
  865         callout_stop(&tp->t_timers->tt_keep);
  866         callout_stop(&tp->t_timers->tt_2msl);
  867         callout_stop(&tp->t_timers->tt_delack);
  868 
  869         /*
  870          * If we got enough samples through the srtt filter,
  871          * save the rtt and rttvar in the routing entry.
  872          * 'Enough' is arbitrarily defined as 4 rtt samples.
  873          * 4 samples is enough for the srtt filter to converge
  874          * to within enough % of the correct value; fewer samples
  875          * and we could save a bogus rtt. The danger is not high
  876          * as tcp quickly recovers from everything.
  877          * XXX: Works very well but needs some more statistics!
  878          */
  879         if (tp->t_rttupdated >= 4) {
  880                 struct hc_metrics_lite metrics;
  881                 u_long ssthresh;
  882 
  883                 bzero(&metrics, sizeof(metrics));
  884                 /*
  885                  * Update the ssthresh always when the conditions below
  886                  * are satisfied. This gives us better new start value
  887                  * for the congestion avoidance for new connections.
  888                  * ssthresh is only set if packet loss occured on a session.
  889                  *
  890                  * XXXRW: 'so' may be NULL here, and/or socket buffer may be
  891                  * being torn down.  Ideally this code would not use 'so'.
  892                  */
  893                 ssthresh = tp->snd_ssthresh;
  894                 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
  895                         /*
  896                          * convert the limit from user data bytes to
  897                          * packets then to packet data bytes.
  898                          */
  899                         ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
  900                         if (ssthresh < 2)
  901                                 ssthresh = 2;
  902                         ssthresh *= (u_long)(tp->t_maxseg +
  903 #ifdef INET6
  904                                       (isipv6 ? sizeof (struct ip6_hdr) +
  905                                                sizeof (struct tcphdr) :
  906 #endif
  907                                        sizeof (struct tcpiphdr)
  908 #ifdef INET6
  909                                        )
  910 #endif
  911                                       );
  912                 } else
  913                         ssthresh = 0;
  914                 metrics.rmx_ssthresh = ssthresh;
  915 
  916                 metrics.rmx_rtt = tp->t_srtt;
  917                 metrics.rmx_rttvar = tp->t_rttvar;
  918                 metrics.rmx_cwnd = tp->snd_cwnd;
  919                 metrics.rmx_sendpipe = 0;
  920                 metrics.rmx_recvpipe = 0;
  921 
  922                 tcp_hc_update(&inp->inp_inc, &metrics);
  923         }
  924 
  925         /* free the reassembly queue, if any */
  926         tcp_reass_flush(tp);
  927         /* Disconnect offload device, if any. */
  928         tcp_offload_detach(tp);
  929                 
  930         tcp_free_sackholes(tp);
  931 
  932         /* Allow the CC algorithm to clean up after itself. */
  933         if (CC_ALGO(tp)->cb_destroy != NULL)
  934                 CC_ALGO(tp)->cb_destroy(tp->ccv);
  935 
  936         khelp_destroy_osd(tp->osd);
  937 
  938         CC_ALGO(tp) = NULL;
  939         inp->inp_ppcb = NULL;
  940         tp->t_inpcb = NULL;
  941         uma_zfree(V_tcpcb_zone, tp);
  942 }
  943 
  944 /*
  945  * Attempt to close a TCP control block, marking it as dropped, and freeing
  946  * the socket if we hold the only reference.
  947  */
  948 struct tcpcb *
  949 tcp_close(struct tcpcb *tp)
  950 {
  951         struct inpcb *inp = tp->t_inpcb;
  952         struct socket *so;
  953 
  954         INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
  955         INP_WLOCK_ASSERT(inp);
  956 
  957         /* Notify any offload devices of listener close */
  958         if (tp->t_state == TCPS_LISTEN)
  959                 tcp_offload_listen_close(tp);
  960         in_pcbdrop(inp);
  961         TCPSTAT_INC(tcps_closed);
  962         KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
  963         so = inp->inp_socket;
  964         soisdisconnected(so);
  965         if (inp->inp_flags & INP_SOCKREF) {
  966                 KASSERT(so->so_state & SS_PROTOREF,
  967                     ("tcp_close: !SS_PROTOREF"));
  968                 inp->inp_flags &= ~INP_SOCKREF;
  969                 INP_WUNLOCK(inp);
  970                 ACCEPT_LOCK();
  971                 SOCK_LOCK(so);
  972                 so->so_state &= ~SS_PROTOREF;
  973                 sofree(so);
  974                 return (NULL);
  975         }
  976         return (tp);
  977 }
  978 
  979 void
  980 tcp_drain(void)
  981 {
  982         VNET_ITERATOR_DECL(vnet_iter);
  983 
  984         if (!do_tcpdrain)
  985                 return;
  986 
  987         VNET_LIST_RLOCK_NOSLEEP();
  988         VNET_FOREACH(vnet_iter) {
  989                 CURVNET_SET(vnet_iter);
  990                 struct inpcb *inpb;
  991                 struct tcpcb *tcpb;
  992 
  993         /*
  994          * Walk the tcpbs, if existing, and flush the reassembly queue,
  995          * if there is one...
  996          * XXX: The "Net/3" implementation doesn't imply that the TCP
  997          *      reassembly queue should be flushed, but in a situation
  998          *      where we're really low on mbufs, this is potentially
  999          *      usefull.
 1000          */
 1001                 INP_INFO_RLOCK(&V_tcbinfo);
 1002                 LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
 1003                         if (inpb->inp_flags & INP_TIMEWAIT)
 1004                                 continue;
 1005                         INP_WLOCK(inpb);
 1006                         if ((tcpb = intotcpcb(inpb)) != NULL) {
 1007                                 tcp_reass_flush(tcpb);
 1008                                 tcp_clean_sackreport(tcpb);
 1009                         }
 1010                         INP_WUNLOCK(inpb);
 1011                 }
 1012                 INP_INFO_RUNLOCK(&V_tcbinfo);
 1013                 CURVNET_RESTORE();
 1014         }
 1015         VNET_LIST_RUNLOCK_NOSLEEP();
 1016 }
 1017 
 1018 /*
 1019  * Notify a tcp user of an asynchronous error;
 1020  * store error as soft error, but wake up user
 1021  * (for now, won't do anything until can select for soft error).
 1022  *
 1023  * Do not wake up user since there currently is no mechanism for
 1024  * reporting soft errors (yet - a kqueue filter may be added).
 1025  */
 1026 static struct inpcb *
 1027 tcp_notify(struct inpcb *inp, int error)
 1028 {
 1029         struct tcpcb *tp;
 1030 
 1031         INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
 1032         INP_WLOCK_ASSERT(inp);
 1033 
 1034         if ((inp->inp_flags & INP_TIMEWAIT) ||
 1035             (inp->inp_flags & INP_DROPPED))
 1036                 return (inp);
 1037 
 1038         tp = intotcpcb(inp);
 1039         KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
 1040 
 1041         /*
 1042          * Ignore some errors if we are hooked up.
 1043          * If connection hasn't completed, has retransmitted several times,
 1044          * and receives a second error, give up now.  This is better
 1045          * than waiting a long time to establish a connection that
 1046          * can never complete.
 1047          */
 1048         if (tp->t_state == TCPS_ESTABLISHED &&
 1049             (error == EHOSTUNREACH || error == ENETUNREACH ||
 1050              error == EHOSTDOWN)) {
 1051                 return (inp);
 1052         } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
 1053             tp->t_softerror) {
 1054                 tp = tcp_drop(tp, error);
 1055                 if (tp != NULL)
 1056                         return (inp);
 1057                 else
 1058                         return (NULL);
 1059         } else {
 1060                 tp->t_softerror = error;
 1061                 return (inp);
 1062         }
 1063 #if 0
 1064         wakeup( &so->so_timeo);
 1065         sorwakeup(so);
 1066         sowwakeup(so);
 1067 #endif
 1068 }
 1069 
 1070 static int
 1071 tcp_pcblist(SYSCTL_HANDLER_ARGS)
 1072 {
 1073         int error, i, m, n, pcb_count;
 1074         struct inpcb *inp, **inp_list;
 1075         inp_gen_t gencnt;
 1076         struct xinpgen xig;
 1077 
 1078         /*
 1079          * The process of preparing the TCB list is too time-consuming and
 1080          * resource-intensive to repeat twice on every request.
 1081          */
 1082         if (req->oldptr == NULL) {
 1083                 n = V_tcbinfo.ipi_count + syncache_pcbcount();
 1084                 n += imax(n / 8, 10);
 1085                 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
 1086                 return (0);
 1087         }
 1088 
 1089         if (req->newptr != NULL)
 1090                 return (EPERM);
 1091 
 1092         /*
 1093          * OK, now we're committed to doing something.
 1094          */
 1095         INP_INFO_RLOCK(&V_tcbinfo);
 1096         gencnt = V_tcbinfo.ipi_gencnt;
 1097         n = V_tcbinfo.ipi_count;
 1098         INP_INFO_RUNLOCK(&V_tcbinfo);
 1099 
 1100         m = syncache_pcbcount();
 1101 
 1102         error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
 1103                 + (n + m) * sizeof(struct xtcpcb));
 1104         if (error != 0)
 1105                 return (error);
 1106 
 1107         xig.xig_len = sizeof xig;
 1108         xig.xig_count = n + m;
 1109         xig.xig_gen = gencnt;
 1110         xig.xig_sogen = so_gencnt;
 1111         error = SYSCTL_OUT(req, &xig, sizeof xig);
 1112         if (error)
 1113                 return (error);
 1114 
 1115         error = syncache_pcblist(req, m, &pcb_count);
 1116         if (error)
 1117                 return (error);
 1118 
 1119         inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
 1120         if (inp_list == NULL)
 1121                 return (ENOMEM);
 1122 
 1123         INP_INFO_RLOCK(&V_tcbinfo);
 1124         for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
 1125             inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
 1126                 INP_WLOCK(inp);
 1127                 if (inp->inp_gencnt <= gencnt) {
 1128                         /*
 1129                          * XXX: This use of cr_cansee(), introduced with
 1130                          * TCP state changes, is not quite right, but for
 1131                          * now, better than nothing.
 1132                          */
 1133                         if (inp->inp_flags & INP_TIMEWAIT) {
 1134                                 if (intotw(inp) != NULL)
 1135                                         error = cr_cansee(req->td->td_ucred,
 1136                                             intotw(inp)->tw_cred);
 1137                                 else
 1138                                         error = EINVAL; /* Skip this inp. */
 1139                         } else
 1140                                 error = cr_canseeinpcb(req->td->td_ucred, inp);
 1141                         if (error == 0) {
 1142                                 in_pcbref(inp);
 1143                                 inp_list[i++] = inp;
 1144                         }
 1145                 }
 1146                 INP_WUNLOCK(inp);
 1147         }
 1148         INP_INFO_RUNLOCK(&V_tcbinfo);
 1149         n = i;
 1150 
 1151         error = 0;
 1152         for (i = 0; i < n; i++) {
 1153                 inp = inp_list[i];
 1154                 INP_RLOCK(inp);
 1155                 if (inp->inp_gencnt <= gencnt) {
 1156                         struct xtcpcb xt;
 1157                         void *inp_ppcb;
 1158 
 1159                         bzero(&xt, sizeof(xt));
 1160                         xt.xt_len = sizeof xt;
 1161                         /* XXX should avoid extra copy */
 1162                         bcopy(inp, &xt.xt_inp, sizeof *inp);
 1163                         inp_ppcb = inp->inp_ppcb;
 1164                         if (inp_ppcb == NULL)
 1165                                 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
 1166                         else if (inp->inp_flags & INP_TIMEWAIT) {
 1167                                 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
 1168                                 xt.xt_tp.t_state = TCPS_TIME_WAIT;
 1169                         } else {
 1170                                 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
 1171                                 if (xt.xt_tp.t_timers)
 1172                                         tcp_timer_to_xtimer(&xt.xt_tp, xt.xt_tp.t_timers, &xt.xt_timer);
 1173                         }
 1174                         if (inp->inp_socket != NULL)
 1175                                 sotoxsocket(inp->inp_socket, &xt.xt_socket);
 1176                         else {
 1177                                 bzero(&xt.xt_socket, sizeof xt.xt_socket);
 1178                                 xt.xt_socket.xso_protocol = IPPROTO_TCP;
 1179                         }
 1180                         xt.xt_inp.inp_gencnt = inp->inp_gencnt;
 1181                         INP_RUNLOCK(inp);
 1182                         error = SYSCTL_OUT(req, &xt, sizeof xt);
 1183                 } else
 1184                         INP_RUNLOCK(inp);
 1185         }
 1186         INP_INFO_WLOCK(&V_tcbinfo);
 1187         for (i = 0; i < n; i++) {
 1188                 inp = inp_list[i];
 1189                 INP_RLOCK(inp);
 1190                 if (!in_pcbrele_rlocked(inp))
 1191                         INP_RUNLOCK(inp);
 1192         }
 1193         INP_INFO_WUNLOCK(&V_tcbinfo);
 1194 
 1195         if (!error) {
 1196                 /*
 1197                  * Give the user an updated idea of our state.
 1198                  * If the generation differs from what we told
 1199                  * her before, she knows that something happened
 1200                  * while we were processing this request, and it
 1201                  * might be necessary to retry.
 1202                  */
 1203                 INP_INFO_RLOCK(&V_tcbinfo);
 1204                 xig.xig_gen = V_tcbinfo.ipi_gencnt;
 1205                 xig.xig_sogen = so_gencnt;
 1206                 xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
 1207                 INP_INFO_RUNLOCK(&V_tcbinfo);
 1208                 error = SYSCTL_OUT(req, &xig, sizeof xig);
 1209         }
 1210         free(inp_list, M_TEMP);
 1211         return (error);
 1212 }
 1213 
 1214 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
 1215     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
 1216     tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
 1217 
 1218 #ifdef INET
 1219 static int
 1220 tcp_getcred(SYSCTL_HANDLER_ARGS)
 1221 {
 1222         struct xucred xuc;
 1223         struct sockaddr_in addrs[2];
 1224         struct inpcb *inp;
 1225         int error;
 1226 
 1227         error = priv_check(req->td, PRIV_NETINET_GETCRED);
 1228         if (error)
 1229                 return (error);
 1230         error = SYSCTL_IN(req, addrs, sizeof(addrs));
 1231         if (error)
 1232                 return (error);
 1233         inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
 1234             addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
 1235         if (inp != NULL) {
 1236                 if (inp->inp_socket == NULL)
 1237                         error = ENOENT;
 1238                 if (error == 0)
 1239                         error = cr_canseeinpcb(req->td->td_ucred, inp);
 1240                 if (error == 0)
 1241                         cru2x(inp->inp_cred, &xuc);
 1242                 INP_RUNLOCK(inp);
 1243         } else
 1244                 error = ENOENT;
 1245         if (error == 0)
 1246                 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
 1247         return (error);
 1248 }
 1249 
 1250 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
 1251     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
 1252     tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
 1253 #endif /* INET */
 1254 
 1255 #ifdef INET6
 1256 static int
 1257 tcp6_getcred(SYSCTL_HANDLER_ARGS)
 1258 {
 1259         struct xucred xuc;
 1260         struct sockaddr_in6 addrs[2];
 1261         struct inpcb *inp;
 1262         int error;
 1263 #ifdef INET
 1264         int mapped = 0;
 1265 #endif
 1266 
 1267         error = priv_check(req->td, PRIV_NETINET_GETCRED);
 1268         if (error)
 1269                 return (error);
 1270         error = SYSCTL_IN(req, addrs, sizeof(addrs));
 1271         if (error)
 1272                 return (error);
 1273         if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
 1274             (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
 1275                 return (error);
 1276         }
 1277         if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
 1278 #ifdef INET
 1279                 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
 1280                         mapped = 1;
 1281                 else
 1282 #endif
 1283                         return (EINVAL);
 1284         }
 1285 
 1286 #ifdef INET
 1287         if (mapped == 1)
 1288                 inp = in_pcblookup(&V_tcbinfo,
 1289                         *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
 1290                         addrs[1].sin6_port,
 1291                         *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
 1292                         addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
 1293         else
 1294 #endif
 1295                 inp = in6_pcblookup(&V_tcbinfo,
 1296                         &addrs[1].sin6_addr, addrs[1].sin6_port,
 1297                         &addrs[0].sin6_addr, addrs[0].sin6_port,
 1298                         INPLOOKUP_RLOCKPCB, NULL);
 1299         if (inp != NULL) {
 1300                 if (inp->inp_socket == NULL)
 1301                         error = ENOENT;
 1302                 if (error == 0)
 1303                         error = cr_canseeinpcb(req->td->td_ucred, inp);
 1304                 if (error == 0)
 1305                         cru2x(inp->inp_cred, &xuc);
 1306                 INP_RUNLOCK(inp);
 1307         } else
 1308                 error = ENOENT;
 1309         if (error == 0)
 1310                 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
 1311         return (error);
 1312 }
 1313 
 1314 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
 1315     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
 1316     tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
 1317 #endif /* INET6 */
 1318 
 1319 
 1320 #ifdef INET
 1321 void
 1322 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
 1323 {
 1324         struct ip *ip = vip;
 1325         struct tcphdr *th;
 1326         struct in_addr faddr;
 1327         struct inpcb *inp;
 1328         struct tcpcb *tp;
 1329         struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
 1330         struct icmp *icp;
 1331         struct in_conninfo inc;
 1332         tcp_seq icmp_tcp_seq;
 1333         int mtu;
 1334 
 1335         faddr = ((struct sockaddr_in *)sa)->sin_addr;
 1336         if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
 1337                 return;
 1338 
 1339         if (cmd == PRC_MSGSIZE)
 1340                 notify = tcp_mtudisc;
 1341         else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
 1342                 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
 1343                 notify = tcp_drop_syn_sent;
 1344         /*
 1345          * Redirects don't need to be handled up here.
 1346          */
 1347         else if (PRC_IS_REDIRECT(cmd))
 1348                 return;
 1349         /*
 1350          * Source quench is depreciated.
 1351          */
 1352         else if (cmd == PRC_QUENCH)
 1353                 return;
 1354         /*
 1355          * Hostdead is ugly because it goes linearly through all PCBs.
 1356          * XXX: We never get this from ICMP, otherwise it makes an
 1357          * excellent DoS attack on machines with many connections.
 1358          */
 1359         else if (cmd == PRC_HOSTDEAD)
 1360                 ip = NULL;
 1361         else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
 1362                 return;
 1363         if (ip != NULL) {
 1364                 icp = (struct icmp *)((caddr_t)ip
 1365                                       - offsetof(struct icmp, icmp_ip));
 1366                 th = (struct tcphdr *)((caddr_t)ip
 1367                                        + (ip->ip_hl << 2));
 1368                 INP_INFO_WLOCK(&V_tcbinfo);
 1369                 inp = in_pcblookup(&V_tcbinfo, faddr, th->th_dport,
 1370                     ip->ip_src, th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
 1371                 if (inp != NULL)  {
 1372                         if (!(inp->inp_flags & INP_TIMEWAIT) &&
 1373                             !(inp->inp_flags & INP_DROPPED) &&
 1374                             !(inp->inp_socket == NULL)) {
 1375                                 icmp_tcp_seq = htonl(th->th_seq);
 1376                                 tp = intotcpcb(inp);
 1377                                 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
 1378                                     SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
 1379                                         if (cmd == PRC_MSGSIZE) {
 1380                                             /*
 1381                                              * MTU discovery:
 1382                                              * If we got a needfrag set the MTU
 1383                                              * in the route to the suggested new
 1384                                              * value (if given) and then notify.
 1385                                              */
 1386                                             bzero(&inc, sizeof(inc));
 1387                                             inc.inc_faddr = faddr;
 1388                                             inc.inc_fibnum =
 1389                                                 inp->inp_inc.inc_fibnum;
 1390 
 1391                                             mtu = ntohs(icp->icmp_nextmtu);
 1392                                             /*
 1393                                              * If no alternative MTU was
 1394                                              * proposed, try the next smaller
 1395                                              * one.  ip->ip_len has already
 1396                                              * been swapped in icmp_input().
 1397                                              */
 1398                                             if (!mtu)
 1399                                                 mtu = ip_next_mtu(ip->ip_len,
 1400                                                  1);
 1401                                             if (mtu < V_tcp_minmss
 1402                                                  + sizeof(struct tcpiphdr))
 1403                                                 mtu = V_tcp_minmss
 1404                                                  + sizeof(struct tcpiphdr);
 1405                                             /*
 1406                                              * Only cache the MTU if it
 1407                                              * is smaller than the interface
 1408                                              * or route MTU.  tcp_mtudisc()
 1409                                              * will do right thing by itself.
 1410                                              */
 1411                                             if (mtu <= tcp_maxmtu(&inc, NULL))
 1412                                                 tcp_hc_updatemtu(&inc, mtu);
 1413                                         }
 1414 
 1415                                         inp = (*notify)(inp, inetctlerrmap[cmd]);
 1416                                 }
 1417                         }
 1418                         if (inp != NULL)
 1419                                 INP_WUNLOCK(inp);
 1420                 } else {
 1421                         bzero(&inc, sizeof(inc));
 1422                         inc.inc_fport = th->th_dport;
 1423                         inc.inc_lport = th->th_sport;
 1424                         inc.inc_faddr = faddr;
 1425                         inc.inc_laddr = ip->ip_src;
 1426                         syncache_unreach(&inc, th);
 1427                 }
 1428                 INP_INFO_WUNLOCK(&V_tcbinfo);
 1429         } else
 1430                 in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
 1431 }
 1432 #endif /* INET */
 1433 
 1434 #ifdef INET6
 1435 void
 1436 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
 1437 {
 1438         struct tcphdr th;
 1439         struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
 1440         struct ip6_hdr *ip6;
 1441         struct mbuf *m;
 1442         struct ip6ctlparam *ip6cp = NULL;
 1443         const struct sockaddr_in6 *sa6_src = NULL;
 1444         int off;
 1445         struct tcp_portonly {
 1446                 u_int16_t th_sport;
 1447                 u_int16_t th_dport;
 1448         } *thp;
 1449 
 1450         if (sa->sa_family != AF_INET6 ||
 1451             sa->sa_len != sizeof(struct sockaddr_in6))
 1452                 return;
 1453 
 1454         if (cmd == PRC_MSGSIZE)
 1455                 notify = tcp_mtudisc;
 1456         else if (!PRC_IS_REDIRECT(cmd) &&
 1457                  ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
 1458                 return;
 1459         /* Source quench is depreciated. */
 1460         else if (cmd == PRC_QUENCH)
 1461                 return;
 1462 
 1463         /* if the parameter is from icmp6, decode it. */
 1464         if (d != NULL) {
 1465                 ip6cp = (struct ip6ctlparam *)d;
 1466                 m = ip6cp->ip6c_m;
 1467                 ip6 = ip6cp->ip6c_ip6;
 1468                 off = ip6cp->ip6c_off;
 1469                 sa6_src = ip6cp->ip6c_src;
 1470         } else {
 1471                 m = NULL;
 1472                 ip6 = NULL;
 1473                 off = 0;        /* fool gcc */
 1474                 sa6_src = &sa6_any;
 1475         }
 1476 
 1477         if (ip6 != NULL) {
 1478                 struct in_conninfo inc;
 1479                 /*
 1480                  * XXX: We assume that when IPV6 is non NULL,
 1481                  * M and OFF are valid.
 1482                  */
 1483 
 1484                 /* check if we can safely examine src and dst ports */
 1485                 if (m->m_pkthdr.len < off + sizeof(*thp))
 1486                         return;
 1487 
 1488                 bzero(&th, sizeof(th));
 1489                 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
 1490 
 1491                 in6_pcbnotify(&V_tcbinfo, sa, th.th_dport,
 1492                     (struct sockaddr *)ip6cp->ip6c_src,
 1493                     th.th_sport, cmd, NULL, notify);
 1494 
 1495                 bzero(&inc, sizeof(inc));
 1496                 inc.inc_fport = th.th_dport;
 1497                 inc.inc_lport = th.th_sport;
 1498                 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
 1499                 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
 1500                 inc.inc_flags |= INC_ISIPV6;
 1501                 INP_INFO_WLOCK(&V_tcbinfo);
 1502                 syncache_unreach(&inc, &th);
 1503                 INP_INFO_WUNLOCK(&V_tcbinfo);
 1504         } else
 1505                 in6_pcbnotify(&V_tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
 1506                               0, cmd, NULL, notify);
 1507 }
 1508 #endif /* INET6 */
 1509 
 1510 
 1511 /*
 1512  * Following is where TCP initial sequence number generation occurs.
 1513  *
 1514  * There are two places where we must use initial sequence numbers:
 1515  * 1.  In SYN-ACK packets.
 1516  * 2.  In SYN packets.
 1517  *
 1518  * All ISNs for SYN-ACK packets are generated by the syncache.  See
 1519  * tcp_syncache.c for details.
 1520  *
 1521  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
 1522  * depends on this property.  In addition, these ISNs should be
 1523  * unguessable so as to prevent connection hijacking.  To satisfy
 1524  * the requirements of this situation, the algorithm outlined in
 1525  * RFC 1948 is used, with only small modifications.
 1526  *
 1527  * Implementation details:
 1528  *
 1529  * Time is based off the system timer, and is corrected so that it
 1530  * increases by one megabyte per second.  This allows for proper
 1531  * recycling on high speed LANs while still leaving over an hour
 1532  * before rollover.
 1533  *
 1534  * As reading the *exact* system time is too expensive to be done
 1535  * whenever setting up a TCP connection, we increment the time
 1536  * offset in two ways.  First, a small random positive increment
 1537  * is added to isn_offset for each connection that is set up.
 1538  * Second, the function tcp_isn_tick fires once per clock tick
 1539  * and increments isn_offset as necessary so that sequence numbers
 1540  * are incremented at approximately ISN_BYTES_PER_SECOND.  The
 1541  * random positive increments serve only to ensure that the same
 1542  * exact sequence number is never sent out twice (as could otherwise
 1543  * happen when a port is recycled in less than the system tick
 1544  * interval.)
 1545  *
 1546  * net.inet.tcp.isn_reseed_interval controls the number of seconds
 1547  * between seeding of isn_secret.  This is normally set to zero,
 1548  * as reseeding should not be necessary.
 1549  *
 1550  * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
 1551  * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock.  In
 1552  * general, this means holding an exclusive (write) lock.
 1553  */
 1554 
 1555 #define ISN_BYTES_PER_SECOND 1048576
 1556 #define ISN_STATIC_INCREMENT 4096
 1557 #define ISN_RANDOM_INCREMENT (4096 - 1)
 1558 
 1559 static VNET_DEFINE(u_char, isn_secret[32]);
 1560 static VNET_DEFINE(int, isn_last);
 1561 static VNET_DEFINE(int, isn_last_reseed);
 1562 static VNET_DEFINE(u_int32_t, isn_offset);
 1563 static VNET_DEFINE(u_int32_t, isn_offset_old);
 1564 
 1565 #define V_isn_secret                    VNET(isn_secret)
 1566 #define V_isn_last                      VNET(isn_last)
 1567 #define V_isn_last_reseed               VNET(isn_last_reseed)
 1568 #define V_isn_offset                    VNET(isn_offset)
 1569 #define V_isn_offset_old                VNET(isn_offset_old)
 1570 
 1571 tcp_seq
 1572 tcp_new_isn(struct tcpcb *tp)
 1573 {
 1574         MD5_CTX isn_ctx;
 1575         u_int32_t md5_buffer[4];
 1576         tcp_seq new_isn;
 1577         u_int32_t projected_offset;
 1578 
 1579         INP_WLOCK_ASSERT(tp->t_inpcb);
 1580 
 1581         ISN_LOCK();
 1582         /* Seed if this is the first use, reseed if requested. */
 1583         if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
 1584              (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
 1585                 < (u_int)ticks))) {
 1586                 read_random(&V_isn_secret, sizeof(V_isn_secret));
 1587                 V_isn_last_reseed = ticks;
 1588         }
 1589 
 1590         /* Compute the md5 hash and return the ISN. */
 1591         MD5Init(&isn_ctx);
 1592         MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
 1593         MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
 1594 #ifdef INET6
 1595         if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
 1596                 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
 1597                           sizeof(struct in6_addr));
 1598                 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
 1599                           sizeof(struct in6_addr));
 1600         } else
 1601 #endif
 1602         {
 1603                 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
 1604                           sizeof(struct in_addr));
 1605                 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
 1606                           sizeof(struct in_addr));
 1607         }
 1608         MD5Update(&isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
 1609         MD5Final((u_char *) &md5_buffer, &isn_ctx);
 1610         new_isn = (tcp_seq) md5_buffer[0];
 1611         V_isn_offset += ISN_STATIC_INCREMENT +
 1612                 (arc4random() & ISN_RANDOM_INCREMENT);
 1613         if (ticks != V_isn_last) {
 1614                 projected_offset = V_isn_offset_old +
 1615                     ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
 1616                 if (SEQ_GT(projected_offset, V_isn_offset))
 1617                         V_isn_offset = projected_offset;
 1618                 V_isn_offset_old = V_isn_offset;
 1619                 V_isn_last = ticks;
 1620         }
 1621         new_isn += V_isn_offset;
 1622         ISN_UNLOCK();
 1623         return (new_isn);
 1624 }
 1625 
 1626 /*
 1627  * When a specific ICMP unreachable message is received and the
 1628  * connection state is SYN-SENT, drop the connection.  This behavior
 1629  * is controlled by the icmp_may_rst sysctl.
 1630  */
 1631 struct inpcb *
 1632 tcp_drop_syn_sent(struct inpcb *inp, int errno)
 1633 {
 1634         struct tcpcb *tp;
 1635 
 1636         INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
 1637         INP_WLOCK_ASSERT(inp);
 1638 
 1639         if ((inp->inp_flags & INP_TIMEWAIT) ||
 1640             (inp->inp_flags & INP_DROPPED))
 1641                 return (inp);
 1642 
 1643         tp = intotcpcb(inp);
 1644         if (tp->t_state != TCPS_SYN_SENT)
 1645                 return (inp);
 1646 
 1647         tp = tcp_drop(tp, errno);
 1648         if (tp != NULL)
 1649                 return (inp);
 1650         else
 1651                 return (NULL);
 1652 }
 1653 
 1654 /*
 1655  * When `need fragmentation' ICMP is received, update our idea of the MSS
 1656  * based on the new value in the route.  Also nudge TCP to send something,
 1657  * since we know the packet we just sent was dropped.
 1658  * This duplicates some code in the tcp_mss() function in tcp_input.c.
 1659  */
 1660 struct inpcb *
 1661 tcp_mtudisc(struct inpcb *inp, int errno)
 1662 {
 1663         struct tcpcb *tp;
 1664         struct socket *so;
 1665 
 1666         INP_WLOCK_ASSERT(inp);
 1667         if ((inp->inp_flags & INP_TIMEWAIT) ||
 1668             (inp->inp_flags & INP_DROPPED))
 1669                 return (inp);
 1670 
 1671         tp = intotcpcb(inp);
 1672         KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
 1673 
 1674         tcp_mss_update(tp, -1, NULL, NULL);
 1675   
 1676         so = inp->inp_socket;
 1677         SOCKBUF_LOCK(&so->so_snd);
 1678         /* If the mss is larger than the socket buffer, decrease the mss. */
 1679         if (so->so_snd.sb_hiwat < tp->t_maxseg)
 1680                 tp->t_maxseg = so->so_snd.sb_hiwat;
 1681         SOCKBUF_UNLOCK(&so->so_snd);
 1682 
 1683         TCPSTAT_INC(tcps_mturesent);
 1684         tp->t_rtttime = 0;
 1685         tp->snd_nxt = tp->snd_una;
 1686         tcp_free_sackholes(tp);
 1687         tp->snd_recover = tp->snd_max;
 1688         if (tp->t_flags & TF_SACK_PERMIT)
 1689                 EXIT_FASTRECOVERY(tp->t_flags);
 1690         tcp_output_send(tp);
 1691         return (inp);
 1692 }
 1693 
 1694 #ifdef INET
 1695 /*
 1696  * Look-up the routing entry to the peer of this inpcb.  If no route
 1697  * is found and it cannot be allocated, then return 0.  This routine
 1698  * is called by TCP routines that access the rmx structure and by
 1699  * tcp_mss_update to get the peer/interface MTU.
 1700  */
 1701 u_long
 1702 tcp_maxmtu(struct in_conninfo *inc, int *flags)
 1703 {
 1704         struct route sro;
 1705         struct sockaddr_in *dst;
 1706         struct ifnet *ifp;
 1707         u_long maxmtu = 0;
 1708 
 1709         KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
 1710 
 1711         bzero(&sro, sizeof(sro));
 1712         if (inc->inc_faddr.s_addr != INADDR_ANY) {
 1713                 dst = (struct sockaddr_in *)&sro.ro_dst;
 1714                 dst->sin_family = AF_INET;
 1715                 dst->sin_len = sizeof(*dst);
 1716                 dst->sin_addr = inc->inc_faddr;
 1717                 in_rtalloc_ign(&sro, 0, inc->inc_fibnum);
 1718         }
 1719         if (sro.ro_rt != NULL) {
 1720                 ifp = sro.ro_rt->rt_ifp;
 1721                 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
 1722                         maxmtu = ifp->if_mtu;
 1723                 else
 1724                         maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
 1725 
 1726                 /* Report additional interface capabilities. */
 1727                 if (flags != NULL) {
 1728                         if (ifp->if_capenable & IFCAP_TSO4 &&
 1729                             ifp->if_hwassist & CSUM_TSO)
 1730                                 *flags |= CSUM_TSO;
 1731                 }
 1732                 RTFREE(sro.ro_rt);
 1733         }
 1734         return (maxmtu);
 1735 }
 1736 #endif /* INET */
 1737 
 1738 #ifdef INET6
 1739 u_long
 1740 tcp_maxmtu6(struct in_conninfo *inc, int *flags)
 1741 {
 1742         struct route_in6 sro6;
 1743         struct ifnet *ifp;
 1744         u_long maxmtu = 0;
 1745 
 1746         KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
 1747 
 1748         bzero(&sro6, sizeof(sro6));
 1749         if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
 1750                 sro6.ro_dst.sin6_family = AF_INET6;
 1751                 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
 1752                 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
 1753                 rtalloc_ign((struct route *)&sro6, 0);
 1754         }
 1755         if (sro6.ro_rt != NULL) {
 1756                 ifp = sro6.ro_rt->rt_ifp;
 1757                 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
 1758                         maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
 1759                 else
 1760                         maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
 1761                                      IN6_LINKMTU(sro6.ro_rt->rt_ifp));
 1762 
 1763                 /* Report additional interface capabilities. */
 1764                 if (flags != NULL) {
 1765                         if (ifp->if_capenable & IFCAP_TSO6 &&
 1766                             ifp->if_hwassist & CSUM_TSO)
 1767                                 *flags |= CSUM_TSO;
 1768                 }
 1769                 RTFREE(sro6.ro_rt);
 1770         }
 1771 
 1772         return (maxmtu);
 1773 }
 1774 #endif /* INET6 */
 1775 
 1776 #ifdef IPSEC
 1777 /* compute ESP/AH header size for TCP, including outer IP header. */
 1778 size_t
 1779 ipsec_hdrsiz_tcp(struct tcpcb *tp)
 1780 {
 1781         struct inpcb *inp;
 1782         struct mbuf *m;
 1783         size_t hdrsiz;
 1784         struct ip *ip;
 1785 #ifdef INET6
 1786         struct ip6_hdr *ip6;
 1787 #endif
 1788         struct tcphdr *th;
 1789 
 1790         if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
 1791                 return (0);
 1792         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1793         if (!m)
 1794                 return (0);
 1795 
 1796 #ifdef INET6
 1797         if ((inp->inp_vflag & INP_IPV6) != 0) {
 1798                 ip6 = mtod(m, struct ip6_hdr *);
 1799                 th = (struct tcphdr *)(ip6 + 1);
 1800                 m->m_pkthdr.len = m->m_len =
 1801                         sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 1802                 tcpip_fillheaders(inp, ip6, th);
 1803                 hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
 1804         } else
 1805 #endif /* INET6 */
 1806         {
 1807                 ip = mtod(m, struct ip *);
 1808                 th = (struct tcphdr *)(ip + 1);
 1809                 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
 1810                 tcpip_fillheaders(inp, ip, th);
 1811                 hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
 1812         }
 1813 
 1814         m_free(m);
 1815         return (hdrsiz);
 1816 }
 1817 #endif /* IPSEC */
 1818 
 1819 #ifdef TCP_SIGNATURE
 1820 /*
 1821  * Callback function invoked by m_apply() to digest TCP segment data
 1822  * contained within an mbuf chain.
 1823  */
 1824 static int
 1825 tcp_signature_apply(void *fstate, void *data, u_int len)
 1826 {
 1827 
 1828         MD5Update(fstate, (u_char *)data, len);
 1829         return (0);
 1830 }
 1831 
 1832 /*
 1833  * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
 1834  *
 1835  * Parameters:
 1836  * m            pointer to head of mbuf chain
 1837  * _unused      
 1838  * len          length of TCP segment data, excluding options
 1839  * optlen       length of TCP segment options
 1840  * buf          pointer to storage for computed MD5 digest
 1841  * direction    direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
 1842  *
 1843  * We do this over ip, tcphdr, segment data, and the key in the SADB.
 1844  * When called from tcp_input(), we can be sure that th_sum has been
 1845  * zeroed out and verified already.
 1846  *
 1847  * Return 0 if successful, otherwise return -1.
 1848  *
 1849  * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
 1850  * search with the destination IP address, and a 'magic SPI' to be
 1851  * determined by the application. This is hardcoded elsewhere to 1179
 1852  * right now. Another branch of this code exists which uses the SPD to
 1853  * specify per-application flows but it is unstable.
 1854  */
 1855 int
 1856 tcp_signature_compute(struct mbuf *m, int _unused, int len, int optlen,
 1857     u_char *buf, u_int direction)
 1858 {
 1859         union sockaddr_union dst;
 1860 #ifdef INET
 1861         struct ippseudo ippseudo;
 1862 #endif
 1863         MD5_CTX ctx;
 1864         int doff;
 1865         struct ip *ip;
 1866 #ifdef INET
 1867         struct ipovly *ipovly;
 1868 #endif
 1869         struct secasvar *sav;
 1870         struct tcphdr *th;
 1871 #ifdef INET6
 1872         struct ip6_hdr *ip6;
 1873         struct in6_addr in6;
 1874         char ip6buf[INET6_ADDRSTRLEN];
 1875         uint32_t plen;
 1876         uint16_t nhdr;
 1877 #endif
 1878         u_short savecsum;
 1879 
 1880         KASSERT(m != NULL, ("NULL mbuf chain"));
 1881         KASSERT(buf != NULL, ("NULL signature pointer"));
 1882 
 1883         /* Extract the destination from the IP header in the mbuf. */
 1884         bzero(&dst, sizeof(union sockaddr_union));
 1885         ip = mtod(m, struct ip *);
 1886 #ifdef INET6
 1887         ip6 = NULL;     /* Make the compiler happy. */
 1888 #endif
 1889         switch (ip->ip_v) {
 1890 #ifdef INET
 1891         case IPVERSION:
 1892                 dst.sa.sa_len = sizeof(struct sockaddr_in);
 1893                 dst.sa.sa_family = AF_INET;
 1894                 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
 1895                     ip->ip_src : ip->ip_dst;
 1896                 break;
 1897 #endif
 1898 #ifdef INET6
 1899         case (IPV6_VERSION >> 4):
 1900                 ip6 = mtod(m, struct ip6_hdr *);
 1901                 dst.sa.sa_len = sizeof(struct sockaddr_in6);
 1902                 dst.sa.sa_family = AF_INET6;
 1903                 dst.sin6.sin6_addr = (direction == IPSEC_DIR_INBOUND) ?
 1904                     ip6->ip6_src : ip6->ip6_dst;
 1905                 break;
 1906 #endif
 1907         default:
 1908                 return (EINVAL);
 1909                 /* NOTREACHED */
 1910                 break;
 1911         }
 1912 
 1913         /* Look up an SADB entry which matches the address of the peer. */
 1914         sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
 1915         if (sav == NULL) {
 1916                 ipseclog((LOG_ERR, "%s: SADB lookup failed for %s\n", __func__,
 1917                     (ip->ip_v == IPVERSION) ? inet_ntoa(dst.sin.sin_addr) :
 1918 #ifdef INET6
 1919                         (ip->ip_v == (IPV6_VERSION >> 4)) ?
 1920                             ip6_sprintf(ip6buf, &dst.sin6.sin6_addr) :
 1921 #endif
 1922                         "(unsupported)"));
 1923                 return (EINVAL);
 1924         }
 1925 
 1926         MD5Init(&ctx);
 1927         /*
 1928          * Step 1: Update MD5 hash with IP(v6) pseudo-header.
 1929          *
 1930          * XXX The ippseudo header MUST be digested in network byte order,
 1931          * or else we'll fail the regression test. Assume all fields we've
 1932          * been doing arithmetic on have been in host byte order.
 1933          * XXX One cannot depend on ipovly->ih_len here. When called from
 1934          * tcp_output(), the underlying ip_len member has not yet been set.
 1935          */
 1936         switch (ip->ip_v) {
 1937 #ifdef INET
 1938         case IPVERSION:
 1939                 ipovly = (struct ipovly *)ip;
 1940                 ippseudo.ippseudo_src = ipovly->ih_src;
 1941                 ippseudo.ippseudo_dst = ipovly->ih_dst;
 1942                 ippseudo.ippseudo_pad = 0;
 1943                 ippseudo.ippseudo_p = IPPROTO_TCP;
 1944                 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) +
 1945                     optlen);
 1946                 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
 1947 
 1948                 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
 1949                 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
 1950                 break;
 1951 #endif
 1952 #ifdef INET6
 1953         /*
 1954          * RFC 2385, 2.0  Proposal
 1955          * For IPv6, the pseudo-header is as described in RFC 2460, namely the
 1956          * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
 1957          * extended next header value (to form 32 bits), and 32-bit segment
 1958          * length.
 1959          * Note: Upper-Layer Packet Length comes before Next Header.
 1960          */
 1961         case (IPV6_VERSION >> 4):
 1962                 in6 = ip6->ip6_src;
 1963                 in6_clearscope(&in6);
 1964                 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
 1965                 in6 = ip6->ip6_dst;
 1966                 in6_clearscope(&in6);
 1967                 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
 1968                 plen = htonl(len + sizeof(struct tcphdr) + optlen);
 1969                 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
 1970                 nhdr = 0;
 1971                 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
 1972                 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
 1973                 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
 1974                 nhdr = IPPROTO_TCP;
 1975                 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
 1976 
 1977                 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
 1978                 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
 1979                 break;
 1980 #endif
 1981         default:
 1982                 return (EINVAL);
 1983                 /* NOTREACHED */
 1984                 break;
 1985         }
 1986 
 1987 
 1988         /*
 1989          * Step 2: Update MD5 hash with TCP header, excluding options.
 1990          * The TCP checksum must be set to zero.
 1991          */
 1992         savecsum = th->th_sum;
 1993         th->th_sum = 0;
 1994         MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
 1995         th->th_sum = savecsum;
 1996 
 1997         /*
 1998          * Step 3: Update MD5 hash with TCP segment data.
 1999          *         Use m_apply() to avoid an early m_pullup().
 2000          */
 2001         if (len > 0)
 2002                 m_apply(m, doff, len, tcp_signature_apply, &ctx);
 2003 
 2004         /*
 2005          * Step 4: Update MD5 hash with shared secret.
 2006          */
 2007         MD5Update(&ctx, sav->key_auth->key_data, _KEYLEN(sav->key_auth));
 2008         MD5Final(buf, &ctx);
 2009 
 2010         key_sa_recordxfer(sav, m);
 2011         KEY_FREESAV(&sav);
 2012         return (0);
 2013 }
 2014 
 2015 /*
 2016  * Verify the TCP-MD5 hash of a TCP segment. (RFC2385)
 2017  *
 2018  * Parameters:
 2019  * m            pointer to head of mbuf chain
 2020  * len          length of TCP segment data, excluding options
 2021  * optlen       length of TCP segment options
 2022  * buf          pointer to storage for computed MD5 digest
 2023  * direction    direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
 2024  *
 2025  * Return 1 if successful, otherwise return 0.
 2026  */
 2027 int
 2028 tcp_signature_verify(struct mbuf *m, int off0, int tlen, int optlen,
 2029     struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
 2030 {
 2031         char tmpdigest[TCP_SIGLEN];
 2032 
 2033         if (tcp_sig_checksigs == 0)
 2034                 return (1);
 2035         if ((tcpbflag & TF_SIGNATURE) == 0) {
 2036                 if ((to->to_flags & TOF_SIGNATURE) != 0) {
 2037 
 2038                         /*
 2039                          * If this socket is not expecting signature but
 2040                          * the segment contains signature just fail.
 2041                          */
 2042                         TCPSTAT_INC(tcps_sig_err_sigopt);
 2043                         TCPSTAT_INC(tcps_sig_rcvbadsig);
 2044                         return (0);
 2045                 }
 2046 
 2047                 /* Signature is not expected, and not present in segment. */
 2048                 return (1);
 2049         }
 2050 
 2051         /*
 2052          * If this socket is expecting signature but the segment does not
 2053          * contain any just fail.
 2054          */
 2055         if ((to->to_flags & TOF_SIGNATURE) == 0) {
 2056                 TCPSTAT_INC(tcps_sig_err_nosigopt);
 2057                 TCPSTAT_INC(tcps_sig_rcvbadsig);
 2058                 return (0);
 2059         }
 2060         if (tcp_signature_compute(m, off0, tlen, optlen, &tmpdigest[0],
 2061             IPSEC_DIR_INBOUND) == -1) {
 2062                 TCPSTAT_INC(tcps_sig_err_buildsig);
 2063                 TCPSTAT_INC(tcps_sig_rcvbadsig);
 2064                 return (0);
 2065         }
 2066         
 2067         if (bcmp(to->to_signature, &tmpdigest[0], TCP_SIGLEN) != 0) {
 2068                 TCPSTAT_INC(tcps_sig_rcvbadsig);
 2069                 return (0);
 2070         }
 2071         TCPSTAT_INC(tcps_sig_rcvgoodsig);
 2072         return (1);
 2073 }
 2074 #endif /* TCP_SIGNATURE */
 2075 
 2076 static int
 2077 sysctl_drop(SYSCTL_HANDLER_ARGS)
 2078 {
 2079         /* addrs[0] is a foreign socket, addrs[1] is a local one. */
 2080         struct sockaddr_storage addrs[2];
 2081         struct inpcb *inp;
 2082         struct tcpcb *tp;
 2083         struct tcptw *tw;
 2084         struct sockaddr_in *fin, *lin;
 2085 #ifdef INET6
 2086         struct sockaddr_in6 *fin6, *lin6;
 2087 #endif
 2088         int error;
 2089 
 2090         inp = NULL;
 2091         fin = lin = NULL;
 2092 #ifdef INET6
 2093         fin6 = lin6 = NULL;
 2094 #endif
 2095         error = 0;
 2096 
 2097         if (req->oldptr != NULL || req->oldlen != 0)
 2098                 return (EINVAL);
 2099         if (req->newptr == NULL)
 2100                 return (EPERM);
 2101         if (req->newlen < sizeof(addrs))
 2102                 return (ENOMEM);
 2103         error = SYSCTL_IN(req, &addrs, sizeof(addrs));
 2104         if (error)
 2105                 return (error);
 2106 
 2107         switch (addrs[0].ss_family) {
 2108 #ifdef INET6
 2109         case AF_INET6:
 2110                 fin6 = (struct sockaddr_in6 *)&addrs[0];
 2111                 lin6 = (struct sockaddr_in6 *)&addrs[1];
 2112                 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
 2113                     lin6->sin6_len != sizeof(struct sockaddr_in6))
 2114                         return (EINVAL);
 2115                 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
 2116                         if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
 2117                                 return (EINVAL);
 2118                         in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
 2119                         in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
 2120                         fin = (struct sockaddr_in *)&addrs[0];
 2121                         lin = (struct sockaddr_in *)&addrs[1];
 2122                         break;
 2123                 }
 2124                 error = sa6_embedscope(fin6, V_ip6_use_defzone);
 2125                 if (error)
 2126                         return (error);
 2127                 error = sa6_embedscope(lin6, V_ip6_use_defzone);
 2128                 if (error)
 2129                         return (error);
 2130                 break;
 2131 #endif
 2132 #ifdef INET
 2133         case AF_INET:
 2134                 fin = (struct sockaddr_in *)&addrs[0];
 2135                 lin = (struct sockaddr_in *)&addrs[1];
 2136                 if (fin->sin_len != sizeof(struct sockaddr_in) ||
 2137                     lin->sin_len != sizeof(struct sockaddr_in))
 2138                         return (EINVAL);
 2139                 break;
 2140 #endif
 2141         default:
 2142                 return (EINVAL);
 2143         }
 2144         INP_INFO_WLOCK(&V_tcbinfo);
 2145         switch (addrs[0].ss_family) {
 2146 #ifdef INET6
 2147         case AF_INET6:
 2148                 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
 2149                     fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
 2150                     INPLOOKUP_WLOCKPCB, NULL);
 2151                 break;
 2152 #endif
 2153 #ifdef INET
 2154         case AF_INET:
 2155                 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
 2156                     lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
 2157                 break;
 2158 #endif
 2159         }
 2160         if (inp != NULL) {
 2161                 if (inp->inp_flags & INP_TIMEWAIT) {
 2162                         /*
 2163                          * XXXRW: There currently exists a state where an
 2164                          * inpcb is present, but its timewait state has been
 2165                          * discarded.  For now, don't allow dropping of this
 2166                          * type of inpcb.
 2167                          */
 2168                         tw = intotw(inp);
 2169                         if (tw != NULL)
 2170                                 tcp_twclose(tw, 0);
 2171                         else
 2172                                 INP_WUNLOCK(inp);
 2173                 } else if (!(inp->inp_flags & INP_DROPPED) &&
 2174                            !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
 2175                         tp = intotcpcb(inp);
 2176                         tp = tcp_drop(tp, ECONNABORTED);
 2177                         if (tp != NULL)
 2178                                 INP_WUNLOCK(inp);
 2179                 } else
 2180                         INP_WUNLOCK(inp);
 2181         } else
 2182                 error = ESRCH;
 2183         INP_INFO_WUNLOCK(&V_tcbinfo);
 2184         return (error);
 2185 }
 2186 
 2187 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
 2188     CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
 2189     0, sysctl_drop, "", "Drop TCP connection");
 2190 
 2191 /*
 2192  * Generate a standardized TCP log line for use throughout the
 2193  * tcp subsystem.  Memory allocation is done with M_NOWAIT to
 2194  * allow use in the interrupt context.
 2195  *
 2196  * NB: The caller MUST free(s, M_TCPLOG) the returned string.
 2197  * NB: The function may return NULL if memory allocation failed.
 2198  *
 2199  * Due to header inclusion and ordering limitations the struct ip
 2200  * and ip6_hdr pointers have to be passed as void pointers.
 2201  */
 2202 char *
 2203 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
 2204     const void *ip6hdr)
 2205 {
 2206 
 2207         /* Is logging enabled? */
 2208         if (tcp_log_in_vain == 0)
 2209                 return (NULL);
 2210 
 2211         return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
 2212 }
 2213 
 2214 char *
 2215 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
 2216     const void *ip6hdr)
 2217 {
 2218 
 2219         /* Is logging enabled? */
 2220         if (tcp_log_debug == 0)
 2221                 return (NULL);
 2222 
 2223         return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
 2224 }
 2225 
 2226 static char *
 2227 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
 2228     const void *ip6hdr)
 2229 {
 2230         char *s, *sp;
 2231         size_t size;
 2232         struct ip *ip;
 2233 #ifdef INET6
 2234         const struct ip6_hdr *ip6;
 2235 
 2236         ip6 = (const struct ip6_hdr *)ip6hdr;
 2237 #endif /* INET6 */
 2238         ip = (struct ip *)ip4hdr;
 2239 
 2240         /*
 2241          * The log line looks like this:
 2242          * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
 2243          */
 2244         size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
 2245             sizeof(PRINT_TH_FLAGS) + 1 +
 2246 #ifdef INET6
 2247             2 * INET6_ADDRSTRLEN;
 2248 #else
 2249             2 * INET_ADDRSTRLEN;
 2250 #endif /* INET6 */
 2251 
 2252         s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
 2253         if (s == NULL)
 2254                 return (NULL);
 2255 
 2256         strcat(s, "TCP: [");
 2257         sp = s + strlen(s);
 2258 
 2259         if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
 2260                 inet_ntoa_r(inc->inc_faddr, sp);
 2261                 sp = s + strlen(s);
 2262                 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
 2263                 sp = s + strlen(s);
 2264                 inet_ntoa_r(inc->inc_laddr, sp);
 2265                 sp = s + strlen(s);
 2266                 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
 2267 #ifdef INET6
 2268         } else if (inc) {
 2269                 ip6_sprintf(sp, &inc->inc6_faddr);
 2270                 sp = s + strlen(s);
 2271                 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
 2272                 sp = s + strlen(s);
 2273                 ip6_sprintf(sp, &inc->inc6_laddr);
 2274                 sp = s + strlen(s);
 2275                 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
 2276         } else if (ip6 && th) {
 2277                 ip6_sprintf(sp, &ip6->ip6_src);
 2278                 sp = s + strlen(s);
 2279                 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
 2280                 sp = s + strlen(s);
 2281                 ip6_sprintf(sp, &ip6->ip6_dst);
 2282                 sp = s + strlen(s);
 2283                 sprintf(sp, "]:%i", ntohs(th->th_dport));
 2284 #endif /* INET6 */
 2285 #ifdef INET
 2286         } else if (ip && th) {
 2287                 inet_ntoa_r(ip->ip_src, sp);
 2288                 sp = s + strlen(s);
 2289                 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
 2290                 sp = s + strlen(s);
 2291                 inet_ntoa_r(ip->ip_dst, sp);
 2292                 sp = s + strlen(s);
 2293                 sprintf(sp, "]:%i", ntohs(th->th_dport));
 2294 #endif /* INET */
 2295         } else {
 2296                 free(s, M_TCPLOG);
 2297                 return (NULL);
 2298         }
 2299         sp = s + strlen(s);
 2300         if (th)
 2301                 sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
 2302         if (*(s + size - 1) != '\0')
 2303                 panic("%s: string too long", __func__);
 2304         return (s);
 2305 }

Cache object: 33a5a131326c591a5d473fa8ef63ab39


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.