1 /*-
2 * Copyright (c) 1984, 1985, 1986, 1987, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * Copyright (c) 1995, Mike Mitchell
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * @(#)spx_usrreq.h
63 */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/protosw.h>
75 #include <sys/signalvar.h>
76 #include <sys/socket.h>
77 #include <sys/socketvar.h>
78 #include <sys/sx.h>
79 #include <sys/systm.h>
80
81 #include <net/route.h>
82 #include <netinet/tcp_fsm.h>
83
84 #include <netipx/ipx.h>
85 #include <netipx/ipx_pcb.h>
86 #include <netipx/ipx_var.h>
87 #include <netipx/spx.h>
88 #include <netipx/spx_debug.h>
89 #include <netipx/spx_timer.h>
90 #include <netipx/spx_var.h>
91
92 /*
93 * SPX protocol implementation.
94 */
95 static struct mtx spx_mtx; /* Protects only spx_iss. */
96 static u_short spx_iss;
97 static u_short spx_newchecks[50];
98 static int spx_hardnosed;
99 static int spx_use_delack = 0;
100 static int traceallspxs = 0;
101 static struct spx_istat spx_istat;
102 static int spxrexmtthresh = 3;
103
104 #define SPX_LOCK_INIT() mtx_init(&spx_mtx, "spx_mtx", NULL, MTX_DEF)
105 #define SPX_LOCK() mtx_lock(&spx_mtx)
106 #define SPX_UNLOCK() mtx_unlock(&spx_mtx)
107
108 /* Following was struct spxstat spxstat; */
109 #ifndef spxstat
110 #define spxstat spx_istat.newstats
111 #endif
112
113 static const int spx_backoff[SPX_MAXRXTSHIFT+1] =
114 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
115
116 static void spx_close(struct spxpcb *cb);
117 static void spx_disconnect(struct spxpcb *cb);
118 static void spx_drop(struct spxpcb *cb, int errno);
119 static int spx_output(struct spxpcb *cb, struct mbuf *m0);
120 static int spx_reass(struct spxpcb *cb, struct spx *si);
121 static void spx_setpersist(struct spxpcb *cb);
122 static void spx_template(struct spxpcb *cb);
123 static void spx_timers(struct spxpcb *cb, int timer);
124 static void spx_usrclosed(struct spxpcb *cb);
125
126 static void spx_usr_abort(struct socket *so);
127 static int spx_accept(struct socket *so, struct sockaddr **nam);
128 static int spx_attach(struct socket *so, int proto, struct thread *td);
129 static int spx_bind(struct socket *so, struct sockaddr *nam, struct thread *td);
130 static void spx_usr_close(struct socket *so);
131 static int spx_connect(struct socket *so, struct sockaddr *nam,
132 struct thread *td);
133 static void spx_detach(struct socket *so);
134 static void spx_pcbdetach(struct ipxpcb *ipxp);
135 static int spx_usr_disconnect(struct socket *so);
136 static int spx_listen(struct socket *so, int backlog, struct thread *td);
137 static int spx_rcvd(struct socket *so, int flags);
138 static int spx_rcvoob(struct socket *so, struct mbuf *m, int flags);
139 static int spx_send(struct socket *so, int flags, struct mbuf *m,
140 struct sockaddr *addr, struct mbuf *control,
141 struct thread *td);
142 static int spx_shutdown(struct socket *so);
143 static int spx_sp_attach(struct socket *so, int proto, struct thread *td);
144
145 struct pr_usrreqs spx_usrreqs = {
146 .pru_abort = spx_usr_abort,
147 .pru_accept = spx_accept,
148 .pru_attach = spx_attach,
149 .pru_bind = spx_bind,
150 .pru_connect = spx_connect,
151 .pru_control = ipx_control,
152 .pru_detach = spx_detach,
153 .pru_disconnect = spx_usr_disconnect,
154 .pru_listen = spx_listen,
155 .pru_peeraddr = ipx_peeraddr,
156 .pru_rcvd = spx_rcvd,
157 .pru_rcvoob = spx_rcvoob,
158 .pru_send = spx_send,
159 .pru_shutdown = spx_shutdown,
160 .pru_sockaddr = ipx_sockaddr,
161 .pru_close = spx_usr_close,
162 };
163
164 struct pr_usrreqs spx_usrreq_sps = {
165 .pru_abort = spx_usr_abort,
166 .pru_accept = spx_accept,
167 .pru_attach = spx_sp_attach,
168 .pru_bind = spx_bind,
169 .pru_connect = spx_connect,
170 .pru_control = ipx_control,
171 .pru_detach = spx_detach,
172 .pru_disconnect = spx_usr_disconnect,
173 .pru_listen = spx_listen,
174 .pru_peeraddr = ipx_peeraddr,
175 .pru_rcvd = spx_rcvd,
176 .pru_rcvoob = spx_rcvoob,
177 .pru_send = spx_send,
178 .pru_shutdown = spx_shutdown,
179 .pru_sockaddr = ipx_sockaddr,
180 .pru_close = spx_usr_close,
181 };
182
183 void
184 spx_init(void)
185 {
186
187 SPX_LOCK_INIT();
188 spx_iss = 1; /* WRONG !! should fish it out of TODR */
189 }
190
191 void
192 spx_input(struct mbuf *m, struct ipxpcb *ipxp)
193 {
194 struct spxpcb *cb;
195 struct spx *si = mtod(m, struct spx *);
196 struct socket *so;
197 struct spx spx_savesi;
198 int dropsocket = 0;
199 short ostate = 0;
200
201 spxstat.spxs_rcvtotal++;
202 KASSERT(ipxp != NULL, ("spx_input: ipxpcb == NULL"));
203
204 /*
205 * spx_input() assumes that the caller will hold both the pcb list
206 * lock and also the ipxp lock. spx_input() will release both before
207 * returning, and may in fact trade in the ipxp lock for another pcb
208 * lock following sonewconn().
209 */
210 IPX_LIST_LOCK_ASSERT();
211 IPX_LOCK_ASSERT(ipxp);
212
213 cb = ipxtospxpcb(ipxp);
214 KASSERT(cb != NULL, ("spx_input: cb == NULL"));
215
216 if (ipxp->ipxp_flags & IPXP_DROPPED)
217 goto drop;
218
219 if (m->m_len < sizeof(*si)) {
220 if ((m = m_pullup(m, sizeof(*si))) == NULL) {
221 IPX_UNLOCK(ipxp);
222 IPX_LIST_UNLOCK();
223 spxstat.spxs_rcvshort++;
224 return;
225 }
226 si = mtod(m, struct spx *);
227 }
228 si->si_seq = ntohs(si->si_seq);
229 si->si_ack = ntohs(si->si_ack);
230 si->si_alo = ntohs(si->si_alo);
231
232 so = ipxp->ipxp_socket;
233 KASSERT(so != NULL, ("spx_input: so == NULL"));
234
235 if (so->so_options & SO_DEBUG || traceallspxs) {
236 ostate = cb->s_state;
237 spx_savesi = *si;
238 }
239 if (so->so_options & SO_ACCEPTCONN) {
240 struct spxpcb *ocb = cb;
241
242 so = sonewconn(so, 0);
243 if (so == NULL)
244 goto drop;
245
246 /*
247 * This is ugly, but ....
248 *
249 * Mark socket as temporary until we're committed to keeping
250 * it. The code at ``drop'' and ``dropwithreset'' check the
251 * flag dropsocket to see if the temporary socket created
252 * here should be discarded. We mark the socket as
253 * discardable until we're committed to it below in
254 * TCPS_LISTEN.
255 *
256 * XXXRW: In the new world order of real kernel parallelism,
257 * temporarily allocating the socket when we're "not sure"
258 * seems like a bad idea, as we might race to remove it if
259 * the listen socket is closed...?
260 *
261 * We drop the lock of the listen socket ipxp, and acquire
262 * the lock of the new socket ippx.
263 */
264 dropsocket++;
265 IPX_UNLOCK(ipxp);
266 ipxp = (struct ipxpcb *)so->so_pcb;
267 IPX_LOCK(ipxp);
268 ipxp->ipxp_laddr = si->si_dna;
269 cb = ipxtospxpcb(ipxp);
270 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */
271 cb->s_flags = ocb->s_flags; /* preserve sockopts */
272 cb->s_flags2 = ocb->s_flags2; /* preserve sockopts */
273 cb->s_state = TCPS_LISTEN;
274 }
275 IPX_LOCK_ASSERT(ipxp);
276
277 /*
278 * Packet received on connection. Reset idle time and keep-alive
279 * timer.
280 */
281 cb->s_idle = 0;
282 cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
283
284 switch (cb->s_state) {
285 case TCPS_LISTEN:{
286 struct sockaddr_ipx *sipx, ssipx;
287 struct ipx_addr laddr;
288
289 /*
290 * If somebody here was carying on a conversation and went
291 * away, and his pen pal thinks he can still talk, we get the
292 * misdirected packet.
293 */
294 if (spx_hardnosed && (si->si_did != 0 || si->si_seq != 0)) {
295 spx_istat.gonawy++;
296 goto dropwithreset;
297 }
298 sipx = &ssipx;
299 bzero(sipx, sizeof *sipx);
300 sipx->sipx_len = sizeof(*sipx);
301 sipx->sipx_family = AF_IPX;
302 sipx->sipx_addr = si->si_sna;
303 laddr = ipxp->ipxp_laddr;
304 if (ipx_nullhost(laddr))
305 ipxp->ipxp_laddr = si->si_dna;
306 if (ipx_pcbconnect(ipxp, (struct sockaddr *)sipx, &thread0)) {
307 ipxp->ipxp_laddr = laddr;
308 spx_istat.noconn++;
309 goto drop;
310 }
311 spx_template(cb);
312 dropsocket = 0; /* committed to socket */
313 cb->s_did = si->si_sid;
314 cb->s_rack = si->si_ack;
315 cb->s_ralo = si->si_alo;
316 #define THREEWAYSHAKE
317 #ifdef THREEWAYSHAKE
318 cb->s_state = TCPS_SYN_RECEIVED;
319 cb->s_force = 1 + SPXT_KEEP;
320 spxstat.spxs_accepts++;
321 cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
322 }
323 break;
324
325 case TCPS_SYN_RECEIVED: {
326 /*
327 * This state means that we have heard a response to our
328 * acceptance of their connection. It is probably logically
329 * unnecessary in this implementation.
330 */
331 if (si->si_did != cb->s_sid) {
332 spx_istat.wrncon++;
333 goto drop;
334 }
335 #endif
336 ipxp->ipxp_fport = si->si_sport;
337 cb->s_timer[SPXT_REXMT] = 0;
338 cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
339 soisconnected(so);
340 cb->s_state = TCPS_ESTABLISHED;
341 spxstat.spxs_accepts++;
342 }
343 break;
344
345 case TCPS_SYN_SENT:
346 /*
347 * This state means that we have gotten a response to our
348 * attempt to establish a connection. We fill in the data
349 * from the other side, telling us which port to respond to,
350 * instead of the well-known one we might have sent to in the
351 * first place. We also require that this is a response to
352 * our connection id.
353 */
354 if (si->si_did != cb->s_sid) {
355 spx_istat.notme++;
356 goto drop;
357 }
358 spxstat.spxs_connects++;
359 cb->s_did = si->si_sid;
360 cb->s_rack = si->si_ack;
361 cb->s_ralo = si->si_alo;
362 cb->s_dport = ipxp->ipxp_fport = si->si_sport;
363 cb->s_timer[SPXT_REXMT] = 0;
364 cb->s_flags |= SF_ACKNOW;
365 soisconnected(so);
366 cb->s_state = TCPS_ESTABLISHED;
367 /*
368 * Use roundtrip time of connection request for initial rtt.
369 */
370 if (cb->s_rtt) {
371 cb->s_srtt = cb->s_rtt << 3;
372 cb->s_rttvar = cb->s_rtt << 1;
373 SPXT_RANGESET(cb->s_rxtcur,
374 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
375 SPXTV_MIN, SPXTV_REXMTMAX);
376 cb->s_rtt = 0;
377 }
378 }
379
380 if (so->so_options & SO_DEBUG || traceallspxs)
381 spx_trace(SA_INPUT, (u_char)ostate, cb, &spx_savesi, 0);
382
383 m->m_len -= sizeof(struct ipx);
384 m->m_pkthdr.len -= sizeof(struct ipx);
385 m->m_data += sizeof(struct ipx);
386
387 if (spx_reass(cb, si))
388 m_freem(m);
389 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT)))
390 spx_output(cb, NULL);
391 cb->s_flags &= ~(SF_WIN|SF_RXT);
392 IPX_UNLOCK(ipxp);
393 IPX_LIST_UNLOCK();
394 return;
395
396 dropwithreset:
397 IPX_LOCK_ASSERT(ipxp);
398 if (cb == NULL || (cb->s_ipxpcb->ipxp_socket->so_options & SO_DEBUG ||
399 traceallspxs))
400 spx_trace(SA_DROP, (u_char)ostate, cb, &spx_savesi, 0);
401 IPX_UNLOCK(ipxp);
402 if (dropsocket) {
403 struct socket *head;
404 ACCEPT_LOCK();
405 KASSERT((so->so_qstate & SQ_INCOMP) != 0,
406 ("spx_input: nascent socket not SQ_INCOMP on soabort()"));
407 head = so->so_head;
408 TAILQ_REMOVE(&head->so_incomp, so, so_list);
409 head->so_incqlen--;
410 so->so_qstate &= ~SQ_INCOMP;
411 so->so_head = NULL;
412 ACCEPT_UNLOCK();
413 soabort(so);
414 }
415 IPX_LIST_UNLOCK();
416 m_freem(dtom(si));
417 return;
418
419 drop:
420 IPX_LOCK_ASSERT(ipxp);
421 if (cb->s_ipxpcb->ipxp_socket->so_options & SO_DEBUG || traceallspxs)
422 spx_trace(SA_DROP, (u_char)ostate, cb, &spx_savesi, 0);
423 IPX_UNLOCK(ipxp);
424 IPX_LIST_UNLOCK();
425 m_freem(m);
426 }
427
428 /*
429 * This is structurally similar to the tcp reassembly routine but its
430 * function is somewhat different: It merely queues packets up, and
431 * suppresses duplicates.
432 */
433 static int
434 spx_reass(struct spxpcb *cb, struct spx *si)
435 {
436 struct spx_q *q;
437 struct mbuf *m;
438 struct socket *so = cb->s_ipxpcb->ipxp_socket;
439 char packetp = cb->s_flags & SF_HI;
440 int incr;
441 char wakeup = 0;
442
443 IPX_LOCK_ASSERT(cb->s_ipxpcb);
444
445 if (si == SI(0))
446 goto present;
447 /*
448 * Update our news from them.
449 */
450 if (si->si_cc & SPX_SA)
451 cb->s_flags |= (spx_use_delack ? SF_DELACK : SF_ACKNOW);
452 if (SSEQ_GT(si->si_alo, cb->s_ralo))
453 cb->s_flags |= SF_WIN;
454 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) {
455 if ((si->si_cc & SPX_SP) && cb->s_rack != (cb->s_smax + 1)) {
456 spxstat.spxs_rcvdupack++;
457 /*
458 * If this is a completely duplicate ack and other
459 * conditions hold, we assume a packet has been
460 * dropped and retransmit it exactly as in
461 * tcp_input().
462 */
463 if (si->si_ack != cb->s_rack ||
464 si->si_alo != cb->s_ralo)
465 cb->s_dupacks = 0;
466 else if (++cb->s_dupacks == spxrexmtthresh) {
467 u_short onxt = cb->s_snxt;
468 int cwnd = cb->s_cwnd;
469
470 cb->s_snxt = si->si_ack;
471 cb->s_cwnd = CUNIT;
472 cb->s_force = 1 + SPXT_REXMT;
473 spx_output(cb, NULL);
474 cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
475 cb->s_rtt = 0;
476 if (cwnd >= 4 * CUNIT)
477 cb->s_cwnd = cwnd / 2;
478 if (SSEQ_GT(onxt, cb->s_snxt))
479 cb->s_snxt = onxt;
480 return (1);
481 }
482 } else
483 cb->s_dupacks = 0;
484 goto update_window;
485 }
486 cb->s_dupacks = 0;
487
488 /*
489 * If our correspondent acknowledges data we haven't sent TCP would
490 * drop the packet after acking. We'll be a little more permissive.
491 */
492 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) {
493 spxstat.spxs_rcvacktoomuch++;
494 si->si_ack = cb->s_smax + 1;
495 }
496 spxstat.spxs_rcvackpack++;
497
498 /*
499 * If transmit timer is running and timed sequence number was acked,
500 * update smoothed round trip time. See discussion of algorithm in
501 * tcp_input.c
502 */
503 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) {
504 spxstat.spxs_rttupdated++;
505 if (cb->s_srtt != 0) {
506 short delta;
507 delta = cb->s_rtt - (cb->s_srtt >> 3);
508 if ((cb->s_srtt += delta) <= 0)
509 cb->s_srtt = 1;
510 if (delta < 0)
511 delta = -delta;
512 delta -= (cb->s_rttvar >> 2);
513 if ((cb->s_rttvar += delta) <= 0)
514 cb->s_rttvar = 1;
515 } else {
516 /*
517 * No rtt measurement yet.
518 */
519 cb->s_srtt = cb->s_rtt << 3;
520 cb->s_rttvar = cb->s_rtt << 1;
521 }
522 cb->s_rtt = 0;
523 cb->s_rxtshift = 0;
524 SPXT_RANGESET(cb->s_rxtcur,
525 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
526 SPXTV_MIN, SPXTV_REXMTMAX);
527 }
528
529 /*
530 * If all outstanding data is acked, stop retransmit timer and
531 * remember to restart (more output or persist). If there is more
532 * data to be acked, restart retransmit timer, using current
533 * (possibly backed-off) value;
534 */
535 if (si->si_ack == cb->s_smax + 1) {
536 cb->s_timer[SPXT_REXMT] = 0;
537 cb->s_flags |= SF_RXT;
538 } else if (cb->s_timer[SPXT_PERSIST] == 0)
539 cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
540
541 /*
542 * When new data is acked, open the congestion window. If the window
543 * gives us less than ssthresh packets in flight, open exponentially
544 * (maxseg at a time). Otherwise open linearly (maxseg^2 / cwnd at a
545 * time).
546 */
547 incr = CUNIT;
548 if (cb->s_cwnd > cb->s_ssthresh)
549 incr = max(incr * incr / cb->s_cwnd, 1);
550 cb->s_cwnd = min(cb->s_cwnd + incr, cb->s_cwmx);
551
552 /*
553 * Trim Acked data from output queue.
554 */
555 SOCKBUF_LOCK(&so->so_snd);
556 while ((m = so->so_snd.sb_mb) != NULL) {
557 if (SSEQ_LT((mtod(m, struct spx *))->si_seq, si->si_ack))
558 sbdroprecord_locked(&so->so_snd);
559 else
560 break;
561 }
562 sowwakeup_locked(so);
563 cb->s_rack = si->si_ack;
564 update_window:
565 if (SSEQ_LT(cb->s_snxt, cb->s_rack))
566 cb->s_snxt = cb->s_rack;
567 if (SSEQ_LT(cb->s_swl1, si->si_seq) || ((cb->s_swl1 == si->si_seq &&
568 (SSEQ_LT(cb->s_swl2, si->si_ack))) ||
569 (cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo)))) {
570 /* keep track of pure window updates */
571 if ((si->si_cc & SPX_SP) && cb->s_swl2 == si->si_ack
572 && SSEQ_LT(cb->s_ralo, si->si_alo)) {
573 spxstat.spxs_rcvwinupd++;
574 spxstat.spxs_rcvdupack--;
575 }
576 cb->s_ralo = si->si_alo;
577 cb->s_swl1 = si->si_seq;
578 cb->s_swl2 = si->si_ack;
579 cb->s_swnd = (1 + si->si_alo - si->si_ack);
580 if (cb->s_swnd > cb->s_smxw)
581 cb->s_smxw = cb->s_swnd;
582 cb->s_flags |= SF_WIN;
583 }
584
585 /*
586 * If this packet number is higher than that which we have allocated
587 * refuse it, unless urgent.
588 */
589 if (SSEQ_GT(si->si_seq, cb->s_alo)) {
590 if (si->si_cc & SPX_SP) {
591 spxstat.spxs_rcvwinprobe++;
592 return (1);
593 } else
594 spxstat.spxs_rcvpackafterwin++;
595 if (si->si_cc & SPX_OB) {
596 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) {
597 m_freem(dtom(si));
598 return (0);
599 } /* else queue this packet; */
600 } else {
601 #ifdef BROKEN
602 /*
603 * XXXRW: This is broken on at least one count:
604 * spx_close() will free the ipxp and related parts,
605 * which are then touched by spx_input() after the
606 * return from spx_reass().
607 */
608 /*struct socket *so = cb->s_ipxpcb->ipxp_socket;
609 if (so->so_state && SS_NOFDREF) {
610 spx_close(cb);
611 } else
612 would crash system*/
613 #endif
614 spx_istat.notyet++;
615 m_freem(dtom(si));
616 return (0);
617 }
618 }
619
620 /*
621 * If this is a system packet, we don't need to queue it up, and
622 * won't update acknowledge #.
623 */
624 if (si->si_cc & SPX_SP)
625 return (1);
626
627 /*
628 * We have already seen this packet, so drop.
629 */
630 if (SSEQ_LT(si->si_seq, cb->s_ack)) {
631 spx_istat.bdreas++;
632 spxstat.spxs_rcvduppack++;
633 if (si->si_seq == cb->s_ack - 1)
634 spx_istat.lstdup++;
635 return (1);
636 }
637
638 /*
639 * Loop through all packets queued up to insert in appropriate
640 * sequence.
641 */
642 for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
643 if (si->si_seq == SI(q)->si_seq) {
644 spxstat.spxs_rcvduppack++;
645 return (1);
646 }
647 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) {
648 spxstat.spxs_rcvoopack++;
649 break;
650 }
651 }
652 insque(si, q->si_prev);
653 /*
654 * If this packet is urgent, inform process
655 */
656 if (si->si_cc & SPX_OB) {
657 cb->s_iobc = ((char *)si)[1 + sizeof(*si)];
658 sohasoutofband(so);
659 cb->s_oobflags |= SF_IOOB;
660 }
661 present:
662 #define SPINC sizeof(struct spxhdr)
663 SOCKBUF_LOCK(&so->so_rcv);
664
665 /*
666 * Loop through all packets queued up to update acknowledge number,
667 * and present all acknowledged data to user; if in packet interface
668 * mode, show packet headers.
669 */
670 for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
671 if (SI(q)->si_seq == cb->s_ack) {
672 cb->s_ack++;
673 m = dtom(q);
674 if (SI(q)->si_cc & SPX_OB) {
675 cb->s_oobflags &= ~SF_IOOB;
676 if (so->so_rcv.sb_cc)
677 so->so_oobmark = so->so_rcv.sb_cc;
678 else
679 so->so_rcv.sb_state |= SBS_RCVATMARK;
680 }
681 q = q->si_prev;
682 remque(q->si_next);
683 wakeup = 1;
684 spxstat.spxs_rcvpack++;
685 #ifdef SF_NEWCALL
686 if (cb->s_flags2 & SF_NEWCALL) {
687 struct spxhdr *sp = mtod(m, struct spxhdr *);
688 u_char dt = sp->spx_dt;
689 spx_newchecks[4]++;
690 if (dt != cb->s_rhdr.spx_dt) {
691 struct mbuf *mm =
692 m_getclr(M_DONTWAIT, MT_CONTROL);
693 spx_newchecks[0]++;
694 if (mm != NULL) {
695 u_short *s =
696 mtod(mm, u_short *);
697 cb->s_rhdr.spx_dt = dt;
698 mm->m_len = 5; /*XXX*/
699 s[0] = 5;
700 s[1] = 1;
701 *(u_char *)(&s[2]) = dt;
702 sbappend_locked(&so->so_rcv, mm);
703 }
704 }
705 if (sp->spx_cc & SPX_OB) {
706 MCHTYPE(m, MT_OOBDATA);
707 spx_newchecks[1]++;
708 so->so_oobmark = 0;
709 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
710 }
711 if (packetp == 0) {
712 m->m_data += SPINC;
713 m->m_len -= SPINC;
714 m->m_pkthdr.len -= SPINC;
715 }
716 if ((sp->spx_cc & SPX_EM) || packetp) {
717 sbappendrecord_locked(&so->so_rcv, m);
718 spx_newchecks[9]++;
719 } else
720 sbappend_locked(&so->so_rcv, m);
721 } else
722 #endif
723 if (packetp)
724 sbappendrecord_locked(&so->so_rcv, m);
725 else {
726 cb->s_rhdr = *mtod(m, struct spxhdr *);
727 m->m_data += SPINC;
728 m->m_len -= SPINC;
729 m->m_pkthdr.len -= SPINC;
730 sbappend_locked(&so->so_rcv, m);
731 }
732 } else
733 break;
734 }
735 if (wakeup)
736 sorwakeup_locked(so);
737 else
738 SOCKBUF_UNLOCK(&so->so_rcv);
739 return (0);
740 }
741
742 void
743 spx_ctlinput(int cmd, struct sockaddr *arg_as_sa, void *dummy)
744 {
745
746 /* Currently, nothing. */
747 }
748
749 static int
750 spx_output(struct spxpcb *cb, struct mbuf *m0)
751 {
752 struct socket *so = cb->s_ipxpcb->ipxp_socket;
753 struct mbuf *m;
754 struct spx *si = NULL;
755 struct sockbuf *sb = &so->so_snd;
756 int len = 0, win, rcv_win;
757 short span, off, recordp = 0;
758 u_short alo;
759 int error = 0, sendalot;
760 #ifdef notdef
761 int idle;
762 #endif
763 struct mbuf *mprev;
764
765 IPX_LOCK_ASSERT(cb->s_ipxpcb);
766
767 if (m0 != NULL) {
768 int mtu = cb->s_mtu;
769 int datalen;
770
771 /*
772 * Make sure that packet isn't too big.
773 */
774 for (m = m0; m != NULL; m = m->m_next) {
775 mprev = m;
776 len += m->m_len;
777 if (m->m_flags & M_EOR)
778 recordp = 1;
779 }
780 datalen = (cb->s_flags & SF_HO) ?
781 len - sizeof(struct spxhdr) : len;
782 if (datalen > mtu) {
783 if (cb->s_flags & SF_PI) {
784 m_freem(m0);
785 return (EMSGSIZE);
786 } else {
787 int oldEM = cb->s_cc & SPX_EM;
788
789 cb->s_cc &= ~SPX_EM;
790 while (len > mtu) {
791 m = m_copym(m0, 0, mtu, M_DONTWAIT);
792 if (m == NULL) {
793 cb->s_cc |= oldEM;
794 m_freem(m0);
795 return (ENOBUFS);
796 }
797 if (cb->s_flags & SF_NEWCALL) {
798 struct mbuf *mm = m;
799 spx_newchecks[7]++;
800 while (mm != NULL) {
801 mm->m_flags &= ~M_EOR;
802 mm = mm->m_next;
803 }
804 }
805 error = spx_output(cb, m);
806 if (error) {
807 cb->s_cc |= oldEM;
808 m_freem(m0);
809 return (error);
810 }
811 m_adj(m0, mtu);
812 len -= mtu;
813 }
814 cb->s_cc |= oldEM;
815 }
816 }
817
818 /*
819 * Force length even, by adding a "garbage byte" if
820 * necessary.
821 */
822 if (len & 1) {
823 m = mprev;
824 if (M_TRAILINGSPACE(m) >= 1)
825 m->m_len++;
826 else {
827 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA);
828
829 if (m1 == NULL) {
830 m_freem(m0);
831 return (ENOBUFS);
832 }
833 m1->m_len = 1;
834 *(mtod(m1, u_char *)) = 0;
835 m->m_next = m1;
836 }
837 }
838 m = m_gethdr(M_DONTWAIT, MT_DATA);
839 if (m == NULL) {
840 m_freem(m0);
841 return (ENOBUFS);
842 }
843
844 /*
845 * Fill in mbuf with extended SP header and addresses and
846 * length put into network format.
847 */
848 MH_ALIGN(m, sizeof(struct spx));
849 m->m_len = sizeof(struct spx);
850 m->m_next = m0;
851 si = mtod(m, struct spx *);
852 si->si_i = *cb->s_ipx;
853 si->si_s = cb->s_shdr;
854 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) {
855 struct spxhdr *sh;
856 if (m0->m_len < sizeof(*sh)) {
857 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) {
858 m_free(m);
859 m_freem(m0);
860 return (EINVAL);
861 }
862 m->m_next = m0;
863 }
864 sh = mtod(m0, struct spxhdr *);
865 si->si_dt = sh->spx_dt;
866 si->si_cc |= sh->spx_cc & SPX_EM;
867 m0->m_len -= sizeof(*sh);
868 m0->m_data += sizeof(*sh);
869 len -= sizeof(*sh);
870 }
871 len += sizeof(*si);
872 if ((cb->s_flags2 & SF_NEWCALL) && recordp) {
873 si->si_cc |= SPX_EM;
874 spx_newchecks[8]++;
875 }
876 if (cb->s_oobflags & SF_SOOB) {
877 /*
878 * Per jqj@cornell: Make sure OB packets convey
879 * exactly 1 byte. If the packet is 1 byte or
880 * larger, we have already guaranted there to be at
881 * least one garbage byte for the checksum, and extra
882 * bytes shouldn't hurt!
883 */
884 if (len > sizeof(*si)) {
885 si->si_cc |= SPX_OB;
886 len = (1 + sizeof(*si));
887 }
888 }
889 si->si_len = htons((u_short)len);
890 m->m_pkthdr.len = ((len - 1) | 1) + 1;
891
892 /*
893 * Queue stuff up for output.
894 */
895 sbappendrecord(sb, m);
896 cb->s_seq++;
897 }
898 #ifdef notdef
899 idle = (cb->s_smax == (cb->s_rack - 1));
900 #endif
901 again:
902 sendalot = 0;
903 off = cb->s_snxt - cb->s_rack;
904 win = min(cb->s_swnd, (cb->s_cwnd / CUNIT));
905
906 /*
907 * If in persist timeout with window of 0, send a probe. Otherwise,
908 * if window is small but nonzero and timer expired, send what we can
909 * and go into transmit state.
910 */
911 if (cb->s_force == 1 + SPXT_PERSIST) {
912 if (win != 0) {
913 cb->s_timer[SPXT_PERSIST] = 0;
914 cb->s_rxtshift = 0;
915 }
916 }
917 span = cb->s_seq - cb->s_rack;
918 len = min(span, win) - off;
919
920 if (len < 0) {
921 /*
922 * Window shrank after we went into it. If window shrank to
923 * 0, cancel pending restransmission and pull s_snxt back to
924 * (closed) window. We will enter persist state below. If
925 * the widndow didn't close completely, just wait for an ACK.
926 */
927 len = 0;
928 if (win == 0) {
929 cb->s_timer[SPXT_REXMT] = 0;
930 cb->s_snxt = cb->s_rack;
931 }
932 }
933 if (len > 1)
934 sendalot = 1;
935 rcv_win = sbspace(&so->so_rcv);
936
937 /*
938 * Send if we owe peer an ACK.
939 */
940 if (cb->s_oobflags & SF_SOOB) {
941 /*
942 * Must transmit this out of band packet.
943 */
944 cb->s_oobflags &= ~ SF_SOOB;
945 sendalot = 1;
946 spxstat.spxs_sndurg++;
947 goto found;
948 }
949 if (cb->s_flags & SF_ACKNOW)
950 goto send;
951 if (cb->s_state < TCPS_ESTABLISHED)
952 goto send;
953
954 /*
955 * Silly window can't happen in spx. Code from TCP deleted.
956 */
957 if (len)
958 goto send;
959
960 /*
961 * Compare available window to amount of window known to peer (as
962 * advertised window less next expected input.) If the difference is
963 * at least two packets or at least 35% of the mximum possible
964 * window, then want to send a window update to peer.
965 */
966 if (rcv_win > 0) {
967 u_short delta = 1 + cb->s_alo - cb->s_ack;
968 int adv = rcv_win - (delta * cb->s_mtu);
969
970 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) ||
971 (100 * adv / so->so_rcv.sb_hiwat >= 35)) {
972 spxstat.spxs_sndwinup++;
973 cb->s_flags |= SF_ACKNOW;
974 goto send;
975 }
976
977 }
978
979 /*
980 * Many comments from tcp_output.c are appropriate here including ...
981 * If send window is too small, there is data to transmit, and no
982 * retransmit or persist is pending, then go to persist state. If
983 * nothing happens soon, send when timer expires: if window is
984 * nonzero, transmit what we can, otherwise send a probe.
985 */
986 if (so->so_snd.sb_cc && cb->s_timer[SPXT_REXMT] == 0 &&
987 cb->s_timer[SPXT_PERSIST] == 0) {
988 cb->s_rxtshift = 0;
989 spx_setpersist(cb);
990 }
991
992 /*
993 * No reason to send a packet, just return.
994 */
995 cb->s_outx = 1;
996 return (0);
997
998 send:
999 /*
1000 * Find requested packet.
1001 */
1002 si = 0;
1003 if (len > 0) {
1004 cb->s_want = cb->s_snxt;
1005 for (m = sb->sb_mb; m != NULL; m = m->m_act) {
1006 si = mtod(m, struct spx *);
1007 if (SSEQ_LEQ(cb->s_snxt, si->si_seq))
1008 break;
1009 }
1010 found:
1011 if (si != NULL) {
1012 if (si->si_seq == cb->s_snxt)
1013 cb->s_snxt++;
1014 else
1015 spxstat.spxs_sndvoid++, si = 0;
1016 }
1017 }
1018
1019 /*
1020 * Update window.
1021 */
1022 if (rcv_win < 0)
1023 rcv_win = 0;
1024 alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu));
1025 if (SSEQ_LT(alo, cb->s_alo))
1026 alo = cb->s_alo;
1027
1028 if (si != NULL) {
1029 /*
1030 * Must make a copy of this packet for ipx_output to monkey
1031 * with.
1032 */
1033 m = m_copy(dtom(si), 0, (int)M_COPYALL);
1034 if (m == NULL)
1035 return (ENOBUFS);
1036 si = mtod(m, struct spx *);
1037 if (SSEQ_LT(si->si_seq, cb->s_smax))
1038 spxstat.spxs_sndrexmitpack++;
1039 else
1040 spxstat.spxs_sndpack++;
1041 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) {
1042 /*
1043 * Must send an acknowledgement or a probe.
1044 */
1045 if (cb->s_force)
1046 spxstat.spxs_sndprobe++;
1047 if (cb->s_flags & SF_ACKNOW)
1048 spxstat.spxs_sndacks++;
1049 m = m_gethdr(M_DONTWAIT, MT_DATA);
1050 if (m == NULL)
1051 return (ENOBUFS);
1052
1053 /*
1054 * Fill in mbuf with extended SP header and addresses and
1055 * length put into network format.
1056 */
1057 MH_ALIGN(m, sizeof(struct spx));
1058 m->m_len = sizeof(*si);
1059 m->m_pkthdr.len = sizeof(*si);
1060 si = mtod(m, struct spx *);
1061 si->si_i = *cb->s_ipx;
1062 si->si_s = cb->s_shdr;
1063 si->si_seq = cb->s_smax + 1;
1064 si->si_len = htons(sizeof(*si));
1065 si->si_cc |= SPX_SP;
1066 } else {
1067 cb->s_outx = 3;
1068 if (so->so_options & SO_DEBUG || traceallspxs)
1069 spx_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1070 return (0);
1071 }
1072 /*
1073 * Stuff checksum and output datagram.
1074 */
1075 if ((si->si_cc & SPX_SP) == 0) {
1076 if (cb->s_force != (1 + SPXT_PERSIST) ||
1077 cb->s_timer[SPXT_PERSIST] == 0) {
1078 /*
1079 * If this is a new packet and we are not currently
1080 * timing anything, time this one.
1081 */
1082 if (SSEQ_LT(cb->s_smax, si->si_seq)) {
1083 cb->s_smax = si->si_seq;
1084 if (cb->s_rtt == 0) {
1085 spxstat.spxs_segstimed++;
1086 cb->s_rtseq = si->si_seq;
1087 cb->s_rtt = 1;
1088 }
1089 }
1090
1091 /*
1092 * Set rexmt timer if not currently set, initial
1093 * value for retransmit timer is smoothed round-trip
1094 * time + 2 * round-trip time variance. Initialize
1095 * shift counter which is used for backoff of
1096 * retransmit time.
1097 */
1098 if (cb->s_timer[SPXT_REXMT] == 0 &&
1099 cb->s_snxt != cb->s_rack) {
1100 cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
1101 if (cb->s_timer[SPXT_PERSIST]) {
1102 cb->s_timer[SPXT_PERSIST] = 0;
1103 cb->s_rxtshift = 0;
1104 }
1105 }
1106 } else if (SSEQ_LT(cb->s_smax, si->si_seq))
1107 cb->s_smax = si->si_seq;
1108 } else if (cb->s_state < TCPS_ESTABLISHED) {
1109 if (cb->s_rtt == 0)
1110 cb->s_rtt = 1; /* Time initial handshake */
1111 if (cb->s_timer[SPXT_REXMT] == 0)
1112 cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
1113 }
1114
1115 /*
1116 * Do not request acks when we ack their data packets or when we do a
1117 * gratuitous window update.
1118 */
1119 if (((si->si_cc & SPX_SP) == 0) || cb->s_force)
1120 si->si_cc |= SPX_SA;
1121 si->si_seq = htons(si->si_seq);
1122 si->si_alo = htons(alo);
1123 si->si_ack = htons(cb->s_ack);
1124
1125 if (ipxcksum)
1126 si->si_sum = ipx_cksum(m, ntohs(si->si_len));
1127 else
1128 si->si_sum = 0xffff;
1129
1130 cb->s_outx = 4;
1131 if (so->so_options & SO_DEBUG || traceallspxs)
1132 spx_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1133
1134 if (so->so_options & SO_DONTROUTE)
1135 error = ipx_outputfl(m, NULL, IPX_ROUTETOIF);
1136 else
1137 error = ipx_outputfl(m, &cb->s_ipxpcb->ipxp_route, 0);
1138 if (error)
1139 return (error);
1140 spxstat.spxs_sndtotal++;
1141
1142 /*
1143 * Data sent (as far as we can tell). If this advertises a larger
1144 * window than any other segment, then remember the size of the
1145 * advertized window. Any pending ACK has now been sent.
1146 */
1147 cb->s_force = 0;
1148 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK);
1149 if (SSEQ_GT(alo, cb->s_alo))
1150 cb->s_alo = alo;
1151 if (sendalot)
1152 goto again;
1153 cb->s_outx = 5;
1154 return (0);
1155 }
1156
1157 static int spx_do_persist_panics = 0;
1158
1159 static void
1160 spx_setpersist(struct spxpcb *cb)
1161 {
1162 int t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1163
1164 IPX_LOCK_ASSERT(cb->s_ipxpcb);
1165
1166 if (cb->s_timer[SPXT_REXMT] && spx_do_persist_panics)
1167 panic("spx_output REXMT");
1168
1169 /*
1170 * Start/restart persistance timer.
1171 */
1172 SPXT_RANGESET(cb->s_timer[SPXT_PERSIST],
1173 t*spx_backoff[cb->s_rxtshift],
1174 SPXTV_PERSMIN, SPXTV_PERSMAX);
1175 if (cb->s_rxtshift < SPX_MAXRXTSHIFT)
1176 cb->s_rxtshift++;
1177 }
1178
1179 int
1180 spx_ctloutput(struct socket *so, struct sockopt *sopt)
1181 {
1182 struct spxhdr spxhdr;
1183 struct ipxpcb *ipxp;
1184 struct spxpcb *cb;
1185 int mask, error;
1186 short soptval;
1187 u_short usoptval;
1188 int optval;
1189
1190 ipxp = sotoipxpcb(so);
1191 KASSERT(ipxp != NULL, ("spx_ctloutput: ipxp == NULL"));
1192
1193 /*
1194 * This will have to be changed when we do more general stacking of
1195 * protocols.
1196 */
1197 if (sopt->sopt_level != IPXPROTO_SPX)
1198 return (ipx_ctloutput(so, sopt));
1199
1200 IPX_LOCK(ipxp);
1201 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1202 IPX_UNLOCK(ipxp);
1203 return (ECONNRESET);
1204 }
1205
1206 IPX_LOCK(ipxp);
1207 cb = ipxtospxpcb(ipxp);
1208 KASSERT(cb != NULL, ("spx_ctloutput: cb == NULL"));
1209
1210 error = 0;
1211 switch (sopt->sopt_dir) {
1212 case SOPT_GET:
1213 switch (sopt->sopt_name) {
1214 case SO_HEADERS_ON_INPUT:
1215 mask = SF_HI;
1216 goto get_flags;
1217
1218 case SO_HEADERS_ON_OUTPUT:
1219 mask = SF_HO;
1220 get_flags:
1221 soptval = cb->s_flags & mask;
1222 IPX_UNLOCK(ipxp);
1223 error = sooptcopyout(sopt, &soptval,
1224 sizeof(soptval));
1225 break;
1226
1227 case SO_MTU:
1228 usoptval = cb->s_mtu;
1229 IPX_UNLOCK(ipxp);
1230 error = sooptcopyout(sopt, &usoptval,
1231 sizeof(usoptval));
1232 break;
1233
1234 case SO_LAST_HEADER:
1235 spxhdr = cb->s_rhdr;
1236 IPX_UNLOCK(ipxp);
1237 error = sooptcopyout(sopt, &spxhdr, sizeof(spxhdr));
1238 break;
1239
1240 case SO_DEFAULT_HEADERS:
1241 spxhdr = cb->s_shdr;
1242 IPX_UNLOCK(ipxp);
1243 error = sooptcopyout(sopt, &spxhdr, sizeof(spxhdr));
1244 break;
1245
1246 default:
1247 IPX_UNLOCK(ipxp);
1248 error = ENOPROTOOPT;
1249 }
1250 break;
1251
1252 case SOPT_SET:
1253 /*
1254 * XXX Why are these shorts on get and ints on set? That
1255 * doesn't make any sense...
1256 *
1257 * XXXRW: Note, when we re-acquire the ipxp lock, we should
1258 * re-check that it's not dropped.
1259 */
1260 IPX_UNLOCK(ipxp);
1261 switch (sopt->sopt_name) {
1262 case SO_HEADERS_ON_INPUT:
1263 mask = SF_HI;
1264 goto set_head;
1265
1266 case SO_HEADERS_ON_OUTPUT:
1267 mask = SF_HO;
1268 set_head:
1269 error = sooptcopyin(sopt, &optval, sizeof optval,
1270 sizeof optval);
1271 if (error)
1272 break;
1273
1274 IPX_LOCK(ipxp);
1275 if (cb->s_flags & SF_PI) {
1276 if (optval)
1277 cb->s_flags |= mask;
1278 else
1279 cb->s_flags &= ~mask;
1280 } else error = EINVAL;
1281 IPX_UNLOCK(ipxp);
1282 break;
1283
1284 case SO_MTU:
1285 error = sooptcopyin(sopt, &usoptval, sizeof usoptval,
1286 sizeof usoptval);
1287 if (error)
1288 break;
1289 /* Unlocked write. */
1290 cb->s_mtu = usoptval;
1291 break;
1292
1293 #ifdef SF_NEWCALL
1294 case SO_NEWCALL:
1295 error = sooptcopyin(sopt, &optval, sizeof optval,
1296 sizeof optval);
1297 if (error)
1298 break;
1299 IPX_LOCK(ipxp);
1300 if (optval) {
1301 cb->s_flags2 |= SF_NEWCALL;
1302 spx_newchecks[5]++;
1303 } else {
1304 cb->s_flags2 &= ~SF_NEWCALL;
1305 spx_newchecks[6]++;
1306 }
1307 IPX_UNLOCK(ipxp);
1308 break;
1309 #endif
1310
1311 case SO_DEFAULT_HEADERS:
1312 {
1313 struct spxhdr sp;
1314
1315 error = sooptcopyin(sopt, &sp, sizeof sp,
1316 sizeof sp);
1317 if (error)
1318 break;
1319 IPX_LOCK(ipxp);
1320 cb->s_dt = sp.spx_dt;
1321 cb->s_cc = sp.spx_cc & SPX_EM;
1322 IPX_UNLOCK(ipxp);
1323 }
1324 break;
1325
1326 default:
1327 error = ENOPROTOOPT;
1328 }
1329 break;
1330
1331 default:
1332 panic("spx_ctloutput: bad socket option direction");
1333 }
1334 return (error);
1335 }
1336
1337 static void
1338 spx_usr_abort(struct socket *so)
1339 {
1340 struct ipxpcb *ipxp;
1341 struct spxpcb *cb;
1342
1343 ipxp = sotoipxpcb(so);
1344 KASSERT(ipxp != NULL, ("spx_usr_abort: ipxp == NULL"));
1345
1346 cb = ipxtospxpcb(ipxp);
1347 KASSERT(cb != NULL, ("spx_usr_abort: cb == NULL"));
1348
1349 IPX_LIST_LOCK();
1350 IPX_LOCK(ipxp);
1351 spx_drop(cb, ECONNABORTED);
1352 IPX_UNLOCK(ipxp);
1353 IPX_LIST_UNLOCK();
1354 }
1355
1356 /*
1357 * Accept a connection. Essentially all the work is done at higher levels;
1358 * just return the address of the peer, storing through addr.
1359 */
1360 static int
1361 spx_accept(struct socket *so, struct sockaddr **nam)
1362 {
1363 struct ipxpcb *ipxp;
1364 struct sockaddr_ipx *sipx, ssipx;
1365
1366 ipxp = sotoipxpcb(so);
1367 KASSERT(ipxp != NULL, ("spx_accept: ipxp == NULL"));
1368
1369 sipx = &ssipx;
1370 bzero(sipx, sizeof *sipx);
1371 sipx->sipx_len = sizeof *sipx;
1372 sipx->sipx_family = AF_IPX;
1373 IPX_LOCK(ipxp);
1374 sipx->sipx_addr = ipxp->ipxp_faddr;
1375 IPX_UNLOCK(ipxp);
1376 *nam = sodupsockaddr((struct sockaddr *)sipx, M_WAITOK);
1377 return (0);
1378 }
1379
1380 static int
1381 spx_attach(struct socket *so, int proto, struct thread *td)
1382 {
1383 struct ipxpcb *ipxp;
1384 struct spxpcb *cb;
1385 struct mbuf *mm;
1386 struct sockbuf *sb;
1387 int error;
1388
1389 ipxp = sotoipxpcb(so);
1390 KASSERT(ipxp == NULL, ("spx_attach: ipxp != NULL"));
1391
1392 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
1393 error = soreserve(so, (u_long) 3072, (u_long) 3072);
1394 if (error)
1395 return (error);
1396 }
1397
1398 MALLOC(cb, struct spxpcb *, sizeof *cb, M_PCB, M_NOWAIT | M_ZERO);
1399 if (cb == NULL)
1400 return (ENOBUFS);
1401 mm = m_getclr(M_DONTWAIT, MT_DATA);
1402 if (mm == NULL) {
1403 FREE(cb, M_PCB);
1404 return (ENOBUFS);
1405 }
1406
1407 IPX_LIST_LOCK();
1408 error = ipx_pcballoc(so, &ipxpcb_list, td);
1409 if (error) {
1410 IPX_LIST_UNLOCK();
1411 m_free(mm);
1412 FREE(cb, M_PCB);
1413 return (error);
1414 }
1415 ipxp = sotoipxpcb(so);
1416 ipxp->ipxp_flags |= IPXP_SPX;
1417
1418 cb->s_ipx = mtod(mm, struct ipx *);
1419 cb->s_state = TCPS_LISTEN;
1420 cb->s_smax = -1;
1421 cb->s_swl1 = -1;
1422 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q;
1423 cb->s_ipxpcb = ipxp;
1424 cb->s_mtu = 576 - sizeof(struct spx);
1425 sb = &so->so_snd;
1426 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu;
1427 cb->s_ssthresh = cb->s_cwnd;
1428 cb->s_cwmx = sbspace(sb) * CUNIT / (2 * sizeof(struct spx));
1429 /*
1430 * Above is recomputed when connecting to account for changed
1431 * buffering or mtu's.
1432 */
1433 cb->s_rtt = SPXTV_SRTTBASE;
1434 cb->s_rttvar = SPXTV_SRTTDFLT << 2;
1435 SPXT_RANGESET(cb->s_rxtcur,
1436 ((SPXTV_SRTTBASE >> 2) + (SPXTV_SRTTDFLT << 2)) >> 1,
1437 SPXTV_MIN, SPXTV_REXMTMAX);
1438 ipxp->ipxp_pcb = (caddr_t)cb;
1439 IPX_LIST_UNLOCK();
1440 return (0);
1441 }
1442
1443 static void
1444 spx_pcbdetach(struct ipxpcb *ipxp)
1445 {
1446 struct spxpcb *cb;
1447 struct spx_q *s;
1448 struct mbuf *m;
1449
1450 IPX_LOCK_ASSERT(ipxp);
1451
1452 cb = ipxtospxpcb(ipxp);
1453 KASSERT(cb != NULL, ("spx_pcbdetach: cb == NULL"));
1454
1455 s = cb->s_q.si_next;
1456 while (s != &(cb->s_q)) {
1457 s = s->si_next;
1458 remque(s);
1459 m = dtom(s);
1460 m_freem(m);
1461 }
1462 m_free(dtom(cb->s_ipx));
1463 FREE(cb, M_PCB);
1464 ipxp->ipxp_pcb = NULL;
1465 }
1466
1467 static int
1468 spx_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1469 {
1470 struct ipxpcb *ipxp;
1471 int error;
1472
1473 ipxp = sotoipxpcb(so);
1474 KASSERT(ipxp != NULL, ("spx_bind: ipxp == NULL"));
1475
1476 IPX_LIST_LOCK();
1477 IPX_LOCK(ipxp);
1478 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1479 error = EINVAL;
1480 goto out;
1481 }
1482 error = ipx_pcbbind(ipxp, nam, td);
1483 out:
1484 IPX_UNLOCK(ipxp);
1485 IPX_LIST_UNLOCK();
1486 return (error);
1487 }
1488
1489 static void
1490 spx_usr_close(struct socket *so)
1491 {
1492 struct ipxpcb *ipxp;
1493 struct spxpcb *cb;
1494
1495 ipxp = sotoipxpcb(so);
1496 KASSERT(ipxp != NULL, ("spx_usr_close: ipxp == NULL"));
1497
1498 cb = ipxtospxpcb(ipxp);
1499 KASSERT(cb != NULL, ("spx_usr_close: cb == NULL"));
1500
1501 IPX_LIST_LOCK();
1502 IPX_LOCK(ipxp);
1503 if (cb->s_state > TCPS_LISTEN)
1504 spx_disconnect(cb);
1505 else
1506 spx_close(cb);
1507 IPX_UNLOCK(ipxp);
1508 IPX_LIST_UNLOCK();
1509 }
1510
1511 /*
1512 * Initiate connection to peer. Enter SYN_SENT state, and mark socket as
1513 * connecting. Start keep-alive timer, setup prototype header, send initial
1514 * system packet requesting connection.
1515 */
1516 static int
1517 spx_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1518 {
1519 struct ipxpcb *ipxp;
1520 struct spxpcb *cb;
1521 int error;
1522
1523 ipxp = sotoipxpcb(so);
1524 KASSERT(ipxp != NULL, ("spx_connect: ipxp == NULL"));
1525
1526 cb = ipxtospxpcb(ipxp);
1527 KASSERT(cb != NULL, ("spx_connect: cb == NULL"));
1528
1529 IPX_LIST_LOCK();
1530 IPX_LOCK(ipxp);
1531 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1532 error = EINVAL;
1533 goto spx_connect_end;
1534 }
1535 if (ipxp->ipxp_lport == 0) {
1536 error = ipx_pcbbind(ipxp, NULL, td);
1537 if (error)
1538 goto spx_connect_end;
1539 }
1540 error = ipx_pcbconnect(ipxp, nam, td);
1541 if (error)
1542 goto spx_connect_end;
1543 soisconnecting(so);
1544 spxstat.spxs_connattempt++;
1545 cb->s_state = TCPS_SYN_SENT;
1546 cb->s_did = 0;
1547 spx_template(cb);
1548 cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
1549 cb->s_force = 1 + SPXTV_KEEP;
1550 /*
1551 * Other party is required to respond to the port I send from, but he
1552 * is not required to answer from where I am sending to, so allow
1553 * wildcarding. Original port I am sending to is still saved in
1554 * cb->s_dport.
1555 */
1556 ipxp->ipxp_fport = 0;
1557 error = spx_output(cb, NULL);
1558 spx_connect_end:
1559 IPX_UNLOCK(ipxp);
1560 IPX_LIST_UNLOCK();
1561 return (error);
1562 }
1563
1564 static void
1565 spx_detach(struct socket *so)
1566 {
1567 struct ipxpcb *ipxp;
1568 struct spxpcb *cb;
1569
1570 /*
1571 * XXXRW: Should assert appropriately detached.
1572 */
1573 ipxp = sotoipxpcb(so);
1574 KASSERT(ipxp != NULL, ("spx_detach: ipxp == NULL"));
1575
1576 cb = ipxtospxpcb(ipxp);
1577 KASSERT(cb != NULL, ("spx_detach: cb == NULL"));
1578
1579 IPX_LIST_LOCK();
1580 IPX_LOCK(ipxp);
1581 spx_pcbdetach(ipxp);
1582 ipx_pcbfree(ipxp);
1583 IPX_LIST_UNLOCK();
1584 }
1585
1586 /*
1587 * We may decide later to implement connection closing handshaking at the spx
1588 * level optionally. Here is the hook to do it:
1589 */
1590 static int
1591 spx_usr_disconnect(struct socket *so)
1592 {
1593 struct ipxpcb *ipxp;
1594 struct spxpcb *cb;
1595 int error;
1596
1597 ipxp = sotoipxpcb(so);
1598 KASSERT(ipxp != NULL, ("spx_usr_disconnect: ipxp == NULL"));
1599
1600 cb = ipxtospxpcb(ipxp);
1601 KASSERT(cb != NULL, ("spx_usr_disconnect: cb == NULL"));
1602
1603 IPX_LIST_LOCK();
1604 IPX_LOCK(ipxp);
1605 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1606 error = EINVAL;
1607 goto out;
1608 }
1609 spx_disconnect(cb);
1610 error = 0;
1611 out:
1612 IPX_UNLOCK(ipxp);
1613 IPX_LIST_UNLOCK();
1614 return (error);
1615 }
1616
1617 static int
1618 spx_listen(struct socket *so, int backlog, struct thread *td)
1619 {
1620 int error;
1621 struct ipxpcb *ipxp;
1622 struct spxpcb *cb;
1623
1624 error = 0;
1625 ipxp = sotoipxpcb(so);
1626 KASSERT(ipxp != NULL, ("spx_listen: ipxp == NULL"));
1627
1628 cb = ipxtospxpcb(ipxp);
1629 KASSERT(cb != NULL, ("spx_listen: cb == NULL"));
1630
1631 IPX_LIST_LOCK();
1632 IPX_LOCK(ipxp);
1633 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1634 error = EINVAL;
1635 goto out;
1636 }
1637 SOCK_LOCK(so);
1638 error = solisten_proto_check(so);
1639 if (error == 0 && ipxp->ipxp_lport == 0)
1640 error = ipx_pcbbind(ipxp, NULL, td);
1641 if (error == 0) {
1642 cb->s_state = TCPS_LISTEN;
1643 solisten_proto(so, backlog);
1644 }
1645 SOCK_UNLOCK(so);
1646 out:
1647 IPX_UNLOCK(ipxp);
1648 IPX_LIST_UNLOCK();
1649 return (error);
1650 }
1651
1652 /*
1653 * After a receive, possibly send acknowledgment updating allocation.
1654 */
1655 static int
1656 spx_rcvd(struct socket *so, int flags)
1657 {
1658 struct ipxpcb *ipxp;
1659 struct spxpcb *cb;
1660 int error;
1661
1662 ipxp = sotoipxpcb(so);
1663 KASSERT(ipxp != NULL, ("spx_rcvd: ipxp == NULL"));
1664
1665 cb = ipxtospxpcb(ipxp);
1666 KASSERT(cb != NULL, ("spx_rcvd: cb == NULL"));
1667
1668 IPX_LOCK(ipxp);
1669 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1670 error = EINVAL;
1671 goto out;
1672 }
1673 cb->s_flags |= SF_RVD;
1674 spx_output(cb, NULL);
1675 cb->s_flags &= ~SF_RVD;
1676 error = 0;
1677 out:
1678 IPX_UNLOCK(ipxp);
1679 return (error);
1680 }
1681
1682 static int
1683 spx_rcvoob(struct socket *so, struct mbuf *m, int flags)
1684 {
1685 struct ipxpcb *ipxp;
1686 struct spxpcb *cb;
1687 int error;
1688
1689 ipxp = sotoipxpcb(so);
1690 KASSERT(ipxp != NULL, ("spx_rcvoob: ipxp == NULL"));
1691
1692 cb = ipxtospxpcb(ipxp);
1693 KASSERT(cb != NULL, ("spx_rcvoob: cb == NULL"));
1694
1695 IPX_LOCK(ipxp);
1696 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1697 error = EINVAL;
1698 goto out;
1699 }
1700 SOCKBUF_LOCK(&so->so_rcv);
1701 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark ||
1702 (so->so_rcv.sb_state & SBS_RCVATMARK)) {
1703 SOCKBUF_UNLOCK(&so->so_rcv);
1704 m->m_len = 1;
1705 *mtod(m, caddr_t) = cb->s_iobc;
1706 error = 0;
1707 goto out;
1708 }
1709 SOCKBUF_UNLOCK(&so->so_rcv);
1710 error = EINVAL;
1711 out:
1712 IPX_UNLOCK(ipxp);
1713 return (error);
1714 }
1715
1716 static int
1717 spx_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1718 struct mbuf *controlp, struct thread *td)
1719 {
1720 struct ipxpcb *ipxp;
1721 struct spxpcb *cb;
1722 int error;
1723
1724 ipxp = sotoipxpcb(so);
1725 KASSERT(ipxp != NULL, ("spx_send: ipxp == NULL"));
1726
1727 cb = ipxtospxpcb(ipxp);
1728 KASSERT(cb != NULL, ("spx_send: cb == NULL"));
1729
1730 error = 0;
1731 IPX_LOCK(ipxp);
1732 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1733 error = ECONNRESET;
1734 goto spx_send_end;
1735 }
1736 if (flags & PRUS_OOB) {
1737 if (sbspace(&so->so_snd) < -512) {
1738 error = ENOBUFS;
1739 goto spx_send_end;
1740 }
1741 cb->s_oobflags |= SF_SOOB;
1742 }
1743 if (controlp != NULL) {
1744 u_short *p = mtod(controlp, u_short *);
1745 spx_newchecks[2]++;
1746 if ((p[0] == 5) && (p[1] == 1)) { /* XXXX, for testing */
1747 cb->s_shdr.spx_dt = *(u_char *)(&p[2]);
1748 spx_newchecks[3]++;
1749 }
1750 m_freem(controlp);
1751 }
1752 controlp = NULL;
1753 error = spx_output(cb, m);
1754 m = NULL;
1755 spx_send_end:
1756 IPX_UNLOCK(ipxp);
1757 if (controlp != NULL)
1758 m_freem(controlp);
1759 if (m != NULL)
1760 m_freem(m);
1761 return (error);
1762 }
1763
1764 static int
1765 spx_shutdown(struct socket *so)
1766 {
1767 struct ipxpcb *ipxp;
1768 struct spxpcb *cb;
1769 int error;
1770
1771 ipxp = sotoipxpcb(so);
1772 KASSERT(ipxp != NULL, ("spx_shutdown: ipxp == NULL"));
1773
1774 cb = ipxtospxpcb(ipxp);
1775 KASSERT(cb != NULL, ("spx_shutdown: cb == NULL"));
1776
1777 socantsendmore(so);
1778 IPX_LIST_LOCK();
1779 IPX_LOCK(ipxp);
1780 if (ipxp->ipxp_flags & IPXP_DROPPED) {
1781 error = EINVAL;
1782 goto out;
1783 }
1784 spx_usrclosed(cb);
1785 error = 0;
1786 out:
1787 IPX_UNLOCK(ipxp);
1788 IPX_LIST_UNLOCK();
1789 return (error);
1790 }
1791
1792 static int
1793 spx_sp_attach(struct socket *so, int proto, struct thread *td)
1794 {
1795 struct ipxpcb *ipxp;
1796 struct spxpcb *cb;
1797 int error;
1798
1799 KASSERT(so->so_pcb == NULL, ("spx_sp_attach: so_pcb != NULL"));
1800
1801 error = spx_attach(so, proto, td);
1802 if (error)
1803 return (error);
1804
1805 ipxp = sotoipxpcb(so);
1806 KASSERT(ipxp != NULL, ("spx_sp_attach: ipxp == NULL"));
1807
1808 cb = ipxtospxpcb(ipxp);
1809 KASSERT(cb != NULL, ("spx_sp_attach: cb == NULL"));
1810
1811 IPX_LOCK(ipxp);
1812 cb->s_flags |= (SF_HI | SF_HO | SF_PI);
1813 IPX_UNLOCK(ipxp);
1814 return (0);
1815 }
1816
1817 /*
1818 * Create template to be used to send spx packets on a connection. Called
1819 * after host entry created, fills in a skeletal spx header (choosing
1820 * connection id), minimizing the amount of work necessary when the
1821 * connection is used.
1822 */
1823 static void
1824 spx_template(struct spxpcb *cb)
1825 {
1826 struct ipxpcb *ipxp = cb->s_ipxpcb;
1827 struct ipx *ipx = cb->s_ipx;
1828 struct sockbuf *sb = &(ipxp->ipxp_socket->so_snd);
1829
1830 IPX_LOCK_ASSERT(ipxp);
1831
1832 ipx->ipx_pt = IPXPROTO_SPX;
1833 ipx->ipx_sna = ipxp->ipxp_laddr;
1834 ipx->ipx_dna = ipxp->ipxp_faddr;
1835 SPX_LOCK();
1836 cb->s_sid = htons(spx_iss);
1837 spx_iss += SPX_ISSINCR/2;
1838 SPX_UNLOCK();
1839 cb->s_alo = 1;
1840 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu;
1841 /* Try to expand fast to full complement of large packets. */
1842 cb->s_ssthresh = cb->s_cwnd;
1843 cb->s_cwmx = (sbspace(sb) * CUNIT) / (2 * sizeof(struct spx));
1844 /* But allow for lots of little packets as well. */
1845 cb->s_cwmx = max(cb->s_cwmx, cb->s_cwnd);
1846 }
1847
1848 /*
1849 * Close a SPIP control block. Wake up any sleepers. We used to free any
1850 * queued packets and cb->s_ipx here, but now we defer that until the pcb is
1851 * discarded.
1852 */
1853 void
1854 spx_close(struct spxpcb *cb)
1855 {
1856 struct ipxpcb *ipxp = cb->s_ipxpcb;
1857 struct socket *so = ipxp->ipxp_socket;
1858
1859 KASSERT(ipxp != NULL, ("spx_close: ipxp == NULL"));
1860 IPX_LIST_LOCK_ASSERT();
1861 IPX_LOCK_ASSERT(ipxp);
1862
1863 ipxp->ipxp_flags |= IPXP_DROPPED;
1864 soisdisconnected(so);
1865 spxstat.spxs_closed++;
1866 }
1867
1868 /*
1869 * Someday we may do level 3 handshaking to close a connection or send a
1870 * xerox style error. For now, just close. cb will always be invalid after
1871 * this call.
1872 */
1873 static void
1874 spx_usrclosed(struct spxpcb *cb)
1875 {
1876
1877 IPX_LIST_LOCK_ASSERT();
1878 IPX_LOCK_ASSERT(cb->s_ipxpcb);
1879
1880 spx_close(cb);
1881 }
1882
1883 /*
1884 * cb will always be invalid after this call.
1885 */
1886 static void
1887 spx_disconnect(struct spxpcb *cb)
1888 {
1889
1890 IPX_LIST_LOCK_ASSERT();
1891 IPX_LOCK_ASSERT(cb->s_ipxpcb);
1892
1893 spx_close(cb);
1894 }
1895
1896 /*
1897 * Drop connection, reporting the specified error. cb will always be invalid
1898 * after this call.
1899 */
1900 static void
1901 spx_drop(struct spxpcb *cb, int errno)
1902 {
1903 struct socket *so = cb->s_ipxpcb->ipxp_socket;
1904
1905 IPX_LIST_LOCK_ASSERT();
1906 IPX_LOCK_ASSERT(cb->s_ipxpcb);
1907
1908 /*
1909 * Someday, in the xerox world we will generate error protocol
1910 * packets announcing that the socket has gone away.
1911 */
1912 if (TCPS_HAVERCVDSYN(cb->s_state)) {
1913 spxstat.spxs_drops++;
1914 cb->s_state = TCPS_CLOSED;
1915 /*tcp_output(cb);*/
1916 } else
1917 spxstat.spxs_conndrops++;
1918 so->so_error = errno;
1919 spx_close(cb);
1920 }
1921
1922 /*
1923 * Fast timeout routine for processing delayed acks.
1924 */
1925 void
1926 spx_fasttimo(void)
1927 {
1928 struct ipxpcb *ipxp;
1929 struct spxpcb *cb;
1930
1931 IPX_LIST_LOCK();
1932 LIST_FOREACH(ipxp, &ipxpcb_list, ipxp_list) {
1933 IPX_LOCK(ipxp);
1934 if (!(ipxp->ipxp_flags & IPXP_SPX) ||
1935 (ipxp->ipxp_flags & IPXP_DROPPED)) {
1936 IPX_UNLOCK(ipxp);
1937 continue;
1938 }
1939 cb = ipxtospxpcb(ipxp);
1940 if (cb->s_flags & SF_DELACK) {
1941 cb->s_flags &= ~SF_DELACK;
1942 cb->s_flags |= SF_ACKNOW;
1943 spxstat.spxs_delack++;
1944 spx_output(cb, NULL);
1945 }
1946 IPX_UNLOCK(ipxp);
1947 }
1948 IPX_LIST_UNLOCK();
1949 }
1950
1951 /*
1952 * spx protocol timeout routine called every 500 ms. Updates the timers in
1953 * all active pcb's and causes finite state machine actions if timers expire.
1954 */
1955 void
1956 spx_slowtimo(void)
1957 {
1958 struct ipxpcb *ipxp;
1959 struct spxpcb *cb;
1960 int i;
1961
1962 /*
1963 * Search through tcb's and update active timers. Once, timers could
1964 * free ipxp's, but now we do that only when detaching a socket.
1965 */
1966 IPX_LIST_LOCK();
1967 LIST_FOREACH(ipxp, &ipxpcb_list, ipxp_list) {
1968 IPX_LOCK(ipxp);
1969 if (!(ipxp->ipxp_flags & IPXP_SPX) ||
1970 (ipxp->ipxp_flags & IPXP_DROPPED)) {
1971 IPX_UNLOCK(ipxp);
1972 continue;
1973 }
1974
1975 cb = (struct spxpcb *)ipxp->ipxp_pcb;
1976 KASSERT(cb != NULL, ("spx_slowtimo: cb == NULL"));
1977 for (i = 0; i < SPXT_NTIMERS; i++) {
1978 if (cb->s_timer[i] && --cb->s_timer[i] == 0) {
1979 spx_timers(cb, i);
1980 if (ipxp->ipxp_flags & IPXP_DROPPED)
1981 break;
1982 }
1983 }
1984 if (!(ipxp->ipxp_flags & IPXP_DROPPED)) {
1985 cb->s_idle++;
1986 if (cb->s_rtt)
1987 cb->s_rtt++;
1988 }
1989 IPX_UNLOCK(ipxp);
1990 }
1991 IPX_LIST_UNLOCK();
1992 SPX_LOCK();
1993 spx_iss += SPX_ISSINCR/PR_SLOWHZ; /* increment iss */
1994 SPX_UNLOCK();
1995 }
1996
1997 /*
1998 * SPX timer processing.
1999 */
2000 static void
2001 spx_timers(struct spxpcb *cb, int timer)
2002 {
2003 long rexmt;
2004 int win;
2005
2006 IPX_LIST_LOCK_ASSERT();
2007 IPX_LOCK_ASSERT(cb->s_ipxpcb);
2008
2009 cb->s_force = 1 + timer;
2010 switch (timer) {
2011 case SPXT_2MSL:
2012 /*
2013 * 2 MSL timeout in shutdown went off. TCP deletes
2014 * connection control block.
2015 */
2016 printf("spx: SPXT_2MSL went off for no reason\n");
2017 cb->s_timer[timer] = 0;
2018 break;
2019
2020 case SPXT_REXMT:
2021 /*
2022 * Retransmission timer went off. Message has not been acked
2023 * within retransmit interval. Back off to a longer
2024 * retransmit interval and retransmit one packet.
2025 */
2026 if (++cb->s_rxtshift > SPX_MAXRXTSHIFT) {
2027 cb->s_rxtshift = SPX_MAXRXTSHIFT;
2028 spxstat.spxs_timeoutdrop++;
2029 spx_drop(cb, ETIMEDOUT);
2030 break;
2031 }
2032 spxstat.spxs_rexmttimeo++;
2033 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
2034 rexmt *= spx_backoff[cb->s_rxtshift];
2035 SPXT_RANGESET(cb->s_rxtcur, rexmt, SPXTV_MIN, SPXTV_REXMTMAX);
2036 cb->s_timer[SPXT_REXMT] = cb->s_rxtcur;
2037
2038 /*
2039 * If we have backed off fairly far, our srtt estimate is
2040 * probably bogus. Clobber it so we'll take the next rtt
2041 * measurement as our srtt; move the current srtt into rttvar
2042 * to keep the current retransmit times until then.
2043 */
2044 if (cb->s_rxtshift > SPX_MAXRXTSHIFT / 4 ) {
2045 cb->s_rttvar += (cb->s_srtt >> 2);
2046 cb->s_srtt = 0;
2047 }
2048 cb->s_snxt = cb->s_rack;
2049
2050 /*
2051 * If timing a packet, stop the timer.
2052 */
2053 cb->s_rtt = 0;
2054
2055 /*
2056 * See very long discussion in tcp_timer.c about congestion
2057 * window and sstrhesh.
2058 */
2059 win = min(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2;
2060 if (win < 2)
2061 win = 2;
2062 cb->s_cwnd = CUNIT;
2063 cb->s_ssthresh = win * CUNIT;
2064 spx_output(cb, NULL);
2065 break;
2066
2067 case SPXT_PERSIST:
2068 /*
2069 * Persistance timer into zero window. Force a probe to be
2070 * sent.
2071 */
2072 spxstat.spxs_persisttimeo++;
2073 spx_setpersist(cb);
2074 spx_output(cb, NULL);
2075 break;
2076
2077 case SPXT_KEEP:
2078 /*
2079 * Keep-alive timer went off; send something or drop
2080 * connection if idle for too long.
2081 */
2082 spxstat.spxs_keeptimeo++;
2083 if (cb->s_state < TCPS_ESTABLISHED)
2084 goto dropit;
2085 if (cb->s_ipxpcb->ipxp_socket->so_options & SO_KEEPALIVE) {
2086 if (cb->s_idle >= SPXTV_MAXIDLE)
2087 goto dropit;
2088 spxstat.spxs_keepprobe++;
2089 spx_output(cb, NULL);
2090 } else
2091 cb->s_idle = 0;
2092 cb->s_timer[SPXT_KEEP] = SPXTV_KEEP;
2093 break;
2094
2095 dropit:
2096 spxstat.spxs_keepdrops++;
2097 spx_drop(cb, ETIMEDOUT);
2098 break;
2099
2100 default:
2101 panic("spx_timers: unknown timer %d", timer);
2102 }
2103 }
Cache object: 3baff18b64963f84ee392e7f79830da5
|