1 /*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
34 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.31.2.6 2000/10/06 20:45:13 alfred Exp $
35 */
36
37 #include "opt_tcpdebug.h"
38
39 #include <sys/param.h>
40 #include <sys/queue.h>
41 #include <sys/proc.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/protosw.h>
50 #include <sys/errno.h>
51
52 #include <net/route.h>
53 #include <net/if.h>
54
55 #define _IP_VHL
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/ip_icmp.h>
63 #include <netinet/tcp.h>
64 #include <netinet/tcp_fsm.h>
65 #include <netinet/tcp_seq.h>
66 #include <netinet/tcp_timer.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/tcpip.h>
69 #ifdef TCPDEBUG
70 #include <netinet/tcp_debug.h>
71 #endif
72
73 int tcp_mssdflt = TCP_MSS;
74 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
75 CTLFLAG_RW, &tcp_mssdflt , 0, "");
76
77 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
78 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt,
79 CTLFLAG_RW, &tcp_rttdflt , 0, "");
80
81 static int tcp_do_rfc1323 = 1;
82 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323,
83 CTLFLAG_RW, &tcp_do_rfc1323 , 0, "");
84
85 static int tcp_do_rfc1644 = 1;
86 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644,
87 CTLFLAG_RW, &tcp_do_rfc1644 , 0, "");
88
89 static void tcp_cleartaocache(void);
90 static void tcp_notify __P((struct inpcb *, int));
91
92 /*
93 * Target size of TCP PCB hash table. Will be rounded down to a prime
94 * number.
95 */
96 #ifndef TCBHASHSIZE
97 #define TCBHASHSIZE 128
98 #endif
99
100 /*
101 * Tcp initialization
102 */
103 void
104 tcp_init()
105 {
106
107 tcp_iss = arc4random(); /* wrong, but better than a constant */
108 tcp_ccgen = 1;
109 tcp_cleartaocache();
110 LIST_INIT(&tcb);
111 tcbinfo.listhead = &tcb;
112 tcbinfo.hashbase = hashinit(TCBHASHSIZE, M_PCB, &tcbinfo.hashmask);
113 if (max_protohdr < sizeof(struct tcpiphdr))
114 max_protohdr = sizeof(struct tcpiphdr);
115 if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN)
116 panic("tcp_init");
117 }
118
119 /*
120 * Create template to be used to send tcp packets on a connection.
121 * Call after host entry created, allocates an mbuf and fills
122 * in a skeletal tcp/ip header, minimizing the amount of work
123 * necessary when the connection is used.
124 */
125 struct tcpiphdr *
126 tcp_template(tp)
127 struct tcpcb *tp;
128 {
129 register struct inpcb *inp = tp->t_inpcb;
130 register struct mbuf *m;
131 register struct tcpiphdr *n;
132
133 if ((n = tp->t_template) == 0) {
134 m = m_get(M_DONTWAIT, MT_HEADER);
135 if (m == NULL)
136 return (0);
137 m->m_len = sizeof (struct tcpiphdr);
138 n = mtod(m, struct tcpiphdr *);
139 }
140 n->ti_next = n->ti_prev = 0;
141 n->ti_x1 = 0;
142 n->ti_pr = IPPROTO_TCP;
143 n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip));
144 n->ti_src = inp->inp_laddr;
145 n->ti_dst = inp->inp_faddr;
146 n->ti_sport = inp->inp_lport;
147 n->ti_dport = inp->inp_fport;
148 n->ti_seq = 0;
149 n->ti_ack = 0;
150 n->ti_x2 = 0;
151 n->ti_off = 5;
152 n->ti_flags = 0;
153 n->ti_win = 0;
154 n->ti_sum = 0;
155 n->ti_urp = 0;
156 return (n);
157 }
158
159 /*
160 * Send a single message to the TCP at address specified by
161 * the given TCP/IP header. If m == 0, then we make a copy
162 * of the tcpiphdr at ti and send directly to the addressed host.
163 * This is used to force keep alive messages out using the TCP
164 * template for a connection tp->t_template. If flags are given
165 * then we send a message back to the TCP which originated the
166 * segment ti, and discard the mbuf containing it and any other
167 * attached mbufs.
168 *
169 * In any case the ack and sequence number of the transmitted
170 * segment are as specified by the parameters.
171 *
172 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
173 */
174 void
175 tcp_respond(tp, ti, m, ack, seq, flags)
176 struct tcpcb *tp;
177 register struct tcpiphdr *ti;
178 register struct mbuf *m;
179 tcp_seq ack, seq;
180 int flags;
181 {
182 register int tlen;
183 int win = 0;
184 struct route *ro = 0;
185 struct route sro;
186
187 if (tp) {
188 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
189 ro = &tp->t_inpcb->inp_route;
190 } else {
191 ro = &sro;
192 bzero(ro, sizeof *ro);
193 }
194 if (m == 0) {
195 m = m_gethdr(M_DONTWAIT, MT_HEADER);
196 if (m == NULL)
197 return;
198 #ifdef TCP_COMPAT_42
199 tlen = 1;
200 #else
201 tlen = 0;
202 #endif
203 m->m_data += max_linkhdr;
204 *mtod(m, struct tcpiphdr *) = *ti;
205 ti = mtod(m, struct tcpiphdr *);
206 flags = TH_ACK;
207 } else {
208 m_freem(m->m_next);
209 m->m_next = 0;
210 m->m_data = (caddr_t)ti;
211 m->m_len = sizeof (struct tcpiphdr);
212 tlen = 0;
213 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
214 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_long);
215 xchg(ti->ti_dport, ti->ti_sport, u_short);
216 #undef xchg
217 }
218 ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen));
219 tlen += sizeof (struct tcpiphdr);
220 m->m_len = tlen;
221 m->m_pkthdr.len = tlen;
222 m->m_pkthdr.rcvif = (struct ifnet *) 0;
223 ti->ti_next = ti->ti_prev = 0;
224 ti->ti_x1 = 0;
225 ti->ti_seq = htonl(seq);
226 ti->ti_ack = htonl(ack);
227 ti->ti_x2 = 0;
228 ti->ti_off = sizeof (struct tcphdr) >> 2;
229 ti->ti_flags = flags;
230 if (tp)
231 ti->ti_win = htons((u_short) (win >> tp->rcv_scale));
232 else
233 ti->ti_win = htons((u_short)win);
234 ti->ti_urp = 0;
235 ti->ti_sum = 0;
236 ti->ti_sum = in_cksum(m, tlen);
237 ((struct ip *)ti)->ip_len = tlen;
238 ((struct ip *)ti)->ip_ttl = ip_defttl;
239 #ifdef TCPDEBUG
240 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
241 tcp_trace(TA_OUTPUT, 0, tp, ti, 0);
242 #endif
243 (void) ip_output(m, NULL, ro, 0, NULL);
244 if (ro == &sro && ro->ro_rt) {
245 RTFREE(ro->ro_rt);
246 }
247 }
248
249 /*
250 * Create a new TCP control block, making an
251 * empty reassembly queue and hooking it to the argument
252 * protocol control block.
253 */
254 struct tcpcb *
255 tcp_newtcpcb(inp)
256 struct inpcb *inp;
257 {
258 register struct tcpcb *tp;
259
260 tp = malloc(sizeof(*tp), M_PCB, M_NOWAIT);
261 if (tp == NULL)
262 return ((struct tcpcb *)0);
263 bzero((char *) tp, sizeof(struct tcpcb));
264 tp->seg_next = tp->seg_prev = (struct tcpiphdr *)tp;
265 tp->t_maxseg = tp->t_maxopd = tcp_mssdflt;
266
267 if (tcp_do_rfc1323)
268 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
269 if (tcp_do_rfc1644)
270 tp->t_flags |= TF_REQ_CC;
271 tp->t_inpcb = inp;
272 /*
273 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
274 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
275 * reasonable initial retransmit time.
276 */
277 tp->t_srtt = TCPTV_SRTTBASE;
278 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
279 tp->t_rttmin = TCPTV_MIN;
280 tp->t_rxtcur = TCPTV_RTOBASE;
281 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
282 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
283 inp->inp_ip_ttl = ip_defttl;
284 inp->inp_ppcb = (caddr_t)tp;
285 return (tp);
286 }
287
288 /*
289 * Drop a TCP connection, reporting
290 * the specified error. If connection is synchronized,
291 * then send a RST to peer.
292 */
293 struct tcpcb *
294 tcp_drop(tp, errno)
295 register struct tcpcb *tp;
296 int errno;
297 {
298 struct socket *so = tp->t_inpcb->inp_socket;
299
300 if (TCPS_HAVERCVDSYN(tp->t_state)) {
301 tp->t_state = TCPS_CLOSED;
302 (void) tcp_output(tp);
303 tcpstat.tcps_drops++;
304 } else
305 tcpstat.tcps_conndrops++;
306 if (errno == ETIMEDOUT && tp->t_softerror)
307 errno = tp->t_softerror;
308 so->so_error = errno;
309 return (tcp_close(tp));
310 }
311
312 /*
313 * Close a TCP control block:
314 * discard all space held by the tcp
315 * discard internet protocol block
316 * wake up any sleepers
317 */
318 struct tcpcb *
319 tcp_close(tp)
320 register struct tcpcb *tp;
321 {
322 register struct tcpiphdr *t;
323 struct inpcb *inp = tp->t_inpcb;
324 struct socket *so = inp->inp_socket;
325 register struct mbuf *m;
326 register struct rtentry *rt;
327
328 /*
329 * If we got enough samples through the srtt filter,
330 * save the rtt and rttvar in the routing entry.
331 * 'Enough' is arbitrarily defined as the 16 samples.
332 * 16 samples is enough for the srtt filter to converge
333 * to within 5% of the correct value; fewer samples and
334 * we could save a very bogus rtt.
335 *
336 * Don't update the default route's characteristics and don't
337 * update anything that the user "locked".
338 */
339 if (tp->t_rttupdated >= 16 &&
340 (rt = inp->inp_route.ro_rt) &&
341 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
342 register u_long i = 0;
343
344 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
345 i = tp->t_srtt *
346 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
347 if (rt->rt_rmx.rmx_rtt && i)
348 /*
349 * filter this update to half the old & half
350 * the new values, converting scale.
351 * See route.h and tcp_var.h for a
352 * description of the scaling constants.
353 */
354 rt->rt_rmx.rmx_rtt =
355 (rt->rt_rmx.rmx_rtt + i) / 2;
356 else
357 rt->rt_rmx.rmx_rtt = i;
358 tcpstat.tcps_cachedrtt++;
359 }
360 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
361 i = tp->t_rttvar *
362 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
363 if (rt->rt_rmx.rmx_rttvar && i)
364 rt->rt_rmx.rmx_rttvar =
365 (rt->rt_rmx.rmx_rttvar + i) / 2;
366 else
367 rt->rt_rmx.rmx_rttvar = i;
368 tcpstat.tcps_cachedrttvar++;
369 }
370 /*
371 * update the pipelimit (ssthresh) if it has been updated
372 * already or if a pipesize was specified & the threshhold
373 * got below half the pipesize. I.e., wait for bad news
374 * before we start updating, then update on both good
375 * and bad news.
376 */
377 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
378 ((i = tp->snd_ssthresh) != 0) && rt->rt_rmx.rmx_ssthresh) ||
379 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
380 /*
381 * convert the limit from user data bytes to
382 * packets then to packet data bytes.
383 */
384 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
385 if (i < 2)
386 i = 2;
387 i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));
388 if (rt->rt_rmx.rmx_ssthresh)
389 rt->rt_rmx.rmx_ssthresh =
390 (rt->rt_rmx.rmx_ssthresh + i) / 2;
391 else
392 rt->rt_rmx.rmx_ssthresh = i;
393 tcpstat.tcps_cachedssthresh++;
394 }
395 }
396 /* free the reassembly queue, if any */
397 t = tp->seg_next;
398 while (t != (struct tcpiphdr *)tp) {
399 t = (struct tcpiphdr *)t->ti_next;
400 m = REASS_MBUF((struct tcpiphdr *)t->ti_prev);
401 remque(t->ti_prev);
402 m_freem(m);
403 }
404 if (tp->t_template)
405 (void) m_free(dtom(tp->t_template));
406 free(tp, M_PCB);
407 inp->inp_ppcb = 0;
408 soisdisconnected(so);
409 in_pcbdetach(inp);
410 tcpstat.tcps_closed++;
411 return ((struct tcpcb *)0);
412 }
413
414 void
415 tcp_drain()
416 {
417
418 }
419
420 /*
421 * Notify a tcp user of an asynchronous error;
422 * store error as soft error, but wake up user
423 * (for now, won't do anything until can select for soft error).
424 */
425 static void
426 tcp_notify(inp, error)
427 struct inpcb *inp;
428 int error;
429 {
430 register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
431 register struct socket *so = inp->inp_socket;
432
433 /*
434 * Ignore some errors if we are hooked up.
435 * If connection hasn't completed, has retransmitted several times,
436 * and receives a second error, give up now. This is better
437 * than waiting a long time to establish a connection that
438 * can never complete.
439 */
440 if (tp->t_state == TCPS_ESTABLISHED &&
441 (error == EHOSTUNREACH || error == ENETUNREACH ||
442 error == EHOSTDOWN)) {
443 return;
444 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
445 tp->t_softerror)
446 so->so_error = error;
447 else
448 tp->t_softerror = error;
449 wakeup((caddr_t) &so->so_timeo);
450 sorwakeup(so);
451 sowwakeup(so);
452 }
453
454 void
455 tcp_ctlinput(cmd, sa, vip)
456 int cmd;
457 struct sockaddr *sa;
458 void *vip;
459 {
460 register struct ip *ip = vip;
461 register struct tcphdr *th;
462 void (*notify) __P((struct inpcb *, int)) = tcp_notify;
463
464 if (cmd == PRC_QUENCH)
465 notify = tcp_quench;
466 #if 1
467 else if (cmd == PRC_MSGSIZE)
468 notify = tcp_mtudisc;
469 #endif
470 else if (!PRC_IS_REDIRECT(cmd) &&
471 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))
472 return;
473 if (ip) {
474 th = (struct tcphdr *)((caddr_t)ip
475 + (IP_VHL_HL(ip->ip_vhl) << 2));
476 in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,
477 cmd, notify);
478 } else
479 in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);
480 }
481
482 /*
483 * When a source quench is received, close congestion window
484 * to one segment. We will gradually open it again as we proceed.
485 */
486 void
487 tcp_quench(inp, errno)
488 struct inpcb *inp;
489 int errno;
490 {
491 struct tcpcb *tp = intotcpcb(inp);
492
493 if (tp)
494 tp->snd_cwnd = tp->t_maxseg;
495 }
496
497 #if 1
498 /*
499 * When `need fragmentation' ICMP is received, update our idea of the MSS
500 * based on the new value in the route. Also nudge TCP to send something,
501 * since we know the packet we just sent was dropped.
502 * This duplicates some code in the tcp_mss() function in tcp_input.c.
503 */
504 void
505 tcp_mtudisc(inp, errno)
506 struct inpcb *inp;
507 int errno;
508 {
509 struct tcpcb *tp = intotcpcb(inp);
510 struct rtentry *rt;
511 struct rmxp_tao *taop;
512 struct socket *so = inp->inp_socket;
513 int offered;
514 int mss;
515
516 if (tp) {
517 rt = tcp_rtlookup(inp);
518 if (!rt || !rt->rt_rmx.rmx_mtu) {
519 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
520 return;
521 }
522 taop = rmx_taop(rt->rt_rmx);
523 offered = taop->tao_mssopt;
524 mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
525 if (offered)
526 mss = min(mss, offered);
527 /*
528 * XXX - The above conditional probably violates the TCP
529 * spec. The problem is that, since we don't know the
530 * other end's MSS, we are supposed to use a conservative
531 * default. But, if we do that, then MTU discovery will
532 * never actually take place, because the conservative
533 * default is much less than the MTUs typically seen
534 * on the Internet today. For the moment, we'll sweep
535 * this under the carpet.
536 *
537 * The conservative default might not actually be a problem
538 * if the only case this occurs is when sending an initial
539 * SYN with options and data to a host we've never talked
540 * to before. Then, they will reply with an MSS value which
541 * will get recorded and the new parameters should get
542 * recomputed. For Further Study.
543 */
544 if (tp->t_maxopd <= mss)
545 return;
546 tp->t_maxopd = mss;
547
548 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
549 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
550 mss -= TCPOLEN_TSTAMP_APPA;
551 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
552 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
553 mss -= TCPOLEN_CC_APPA;
554 #if (MCLBYTES & (MCLBYTES - 1)) == 0
555 if (mss > MCLBYTES)
556 mss &= ~(MCLBYTES-1);
557 #else
558 if (mss > MCLBYTES)
559 mss = mss / MCLBYTES * MCLBYTES;
560 #endif
561 if (so->so_snd.sb_hiwat < mss)
562 mss = so->so_snd.sb_hiwat;
563
564 tp->t_maxseg = mss;
565
566 tcpstat.tcps_mturesent++;
567 tp->t_rtt = 0;
568 tp->snd_nxt = tp->snd_una;
569 tcp_output(tp);
570 }
571 }
572 #endif
573
574 /*
575 * Look-up the routing entry to the peer of this inpcb. If no route
576 * is found and it cannot be allocated the return NULL. This routine
577 * is called by TCP routines that access the rmx structure and by tcp_mss
578 * to get the interface MTU.
579 */
580 struct rtentry *
581 tcp_rtlookup(inp)
582 struct inpcb *inp;
583 {
584 struct route *ro;
585 struct rtentry *rt;
586
587 ro = &inp->inp_route;
588 rt = ro->ro_rt;
589 if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
590 /* No route yet, so try to acquire one */
591 if (inp->inp_faddr.s_addr != INADDR_ANY) {
592 ro->ro_dst.sa_family = AF_INET;
593 ro->ro_dst.sa_len = sizeof(ro->ro_dst);
594 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
595 inp->inp_faddr;
596 rtalloc(ro);
597 rt = ro->ro_rt;
598 }
599 }
600 return rt;
601 }
602
603 /*
604 * Return a pointer to the cached information about the remote host.
605 * The cached information is stored in the protocol specific part of
606 * the route metrics.
607 */
608 struct rmxp_tao *
609 tcp_gettaocache(inp)
610 struct inpcb *inp;
611 {
612 struct rtentry *rt = tcp_rtlookup(inp);
613
614 /* Make sure this is a host route and is up. */
615 if (rt == NULL ||
616 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))
617 return NULL;
618
619 return rmx_taop(rt->rt_rmx);
620 }
621
622 /*
623 * Clear all the TAO cache entries, called from tcp_init.
624 *
625 * XXX
626 * This routine is just an empty one, because we assume that the routing
627 * routing tables are initialized at the same time when TCP, so there is
628 * nothing in the cache left over.
629 */
630 static void
631 tcp_cleartaocache(void)
632 { }
Cache object: 5505138cbc769c9188835672d8e33d74
|