1 /*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.1/sys/netinet/sctputil.c 209067 2010-06-11 20:38:20Z tuexen $");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52
53 #define NUMBER_OF_MTU_SIZES 18
54
55
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59
60 void
61 sctp_sblog(struct sockbuf *sb,
62 struct sctp_tcb *stcb, int from, int incr)
63 {
64 struct sctp_cwnd_log sctp_clog;
65
66 sctp_clog.x.sb.stcb = stcb;
67 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 if (stcb)
69 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 else
71 sctp_clog.x.sb.stcb_sbcc = 0;
72 sctp_clog.x.sb.incr = incr;
73 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 SCTP_LOG_EVENT_SB,
75 from,
76 sctp_clog.x.misc.log1,
77 sctp_clog.x.misc.log2,
78 sctp_clog.x.misc.log3,
79 sctp_clog.x.misc.log4);
80 }
81
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 struct sctp_cwnd_log sctp_clog;
86
87 sctp_clog.x.close.inp = (void *)inp;
88 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 if (stcb) {
90 sctp_clog.x.close.stcb = (void *)stcb;
91 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 } else {
93 sctp_clog.x.close.stcb = 0;
94 sctp_clog.x.close.state = 0;
95 }
96 sctp_clog.x.close.loc = loc;
97 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 SCTP_LOG_EVENT_CLOSE,
99 0,
100 sctp_clog.x.misc.log1,
101 sctp_clog.x.misc.log2,
102 sctp_clog.x.misc.log3,
103 sctp_clog.x.misc.log4);
104 }
105
106
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 struct sctp_cwnd_log sctp_clog;
111
112 memset(&sctp_clog, 0, sizeof(sctp_clog));
113 sctp_clog.x.rto.net = (void *)net;
114 sctp_clog.x.rto.rtt = net->prev_rtt;
115 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 SCTP_LOG_EVENT_RTT,
117 from,
118 sctp_clog.x.misc.log1,
119 sctp_clog.x.misc.log2,
120 sctp_clog.x.misc.log3,
121 sctp_clog.x.misc.log4);
122
123 }
124
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 struct sctp_cwnd_log sctp_clog;
129
130 sctp_clog.x.strlog.stcb = stcb;
131 sctp_clog.x.strlog.n_tsn = tsn;
132 sctp_clog.x.strlog.n_sseq = sseq;
133 sctp_clog.x.strlog.e_tsn = 0;
134 sctp_clog.x.strlog.e_sseq = 0;
135 sctp_clog.x.strlog.strm = stream;
136 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 SCTP_LOG_EVENT_STRM,
138 from,
139 sctp_clog.x.misc.log1,
140 sctp_clog.x.misc.log2,
141 sctp_clog.x.misc.log3,
142 sctp_clog.x.misc.log4);
143
144 }
145
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 struct sctp_cwnd_log sctp_clog;
150
151 sctp_clog.x.nagle.stcb = (void *)stcb;
152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 SCTP_LOG_EVENT_NAGLE,
158 action,
159 sctp_clog.x.misc.log1,
160 sctp_clog.x.misc.log2,
161 sctp_clog.x.misc.log3,
162 sctp_clog.x.misc.log4);
163 }
164
165
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 struct sctp_cwnd_log sctp_clog;
170
171 sctp_clog.x.sack.cumack = cumack;
172 sctp_clog.x.sack.oldcumack = old_cumack;
173 sctp_clog.x.sack.tsn = tsn;
174 sctp_clog.x.sack.numGaps = gaps;
175 sctp_clog.x.sack.numDups = dups;
176 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 SCTP_LOG_EVENT_SACK,
178 from,
179 sctp_clog.x.misc.log1,
180 sctp_clog.x.misc.log2,
181 sctp_clog.x.misc.log3,
182 sctp_clog.x.misc.log4);
183 }
184
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 struct sctp_cwnd_log sctp_clog;
189
190 memset(&sctp_clog, 0, sizeof(sctp_clog));
191 sctp_clog.x.map.base = map;
192 sctp_clog.x.map.cum = cum;
193 sctp_clog.x.map.high = high;
194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 SCTP_LOG_EVENT_MAP,
196 from,
197 sctp_clog.x.misc.log1,
198 sctp_clog.x.misc.log2,
199 sctp_clog.x.misc.log3,
200 sctp_clog.x.misc.log4);
201 }
202
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205 int from)
206 {
207 struct sctp_cwnd_log sctp_clog;
208
209 memset(&sctp_clog, 0, sizeof(sctp_clog));
210 sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 sctp_clog.x.fr.tsn = tsn;
213 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 SCTP_LOG_EVENT_FR,
215 from,
216 sctp_clog.x.misc.log1,
217 sctp_clog.x.misc.log2,
218 sctp_clog.x.misc.log3,
219 sctp_clog.x.misc.log4);
220
221 }
222
223
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 struct sctp_cwnd_log sctp_clog;
228
229 sctp_clog.x.mb.mp = m;
230 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 if (SCTP_BUF_IS_EXTENDED(m)) {
234 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 } else {
237 sctp_clog.x.mb.ext = 0;
238 sctp_clog.x.mb.refcnt = 0;
239 }
240 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 SCTP_LOG_EVENT_MBUF,
242 from,
243 sctp_clog.x.misc.log1,
244 sctp_clog.x.misc.log2,
245 sctp_clog.x.misc.log3,
246 sctp_clog.x.misc.log4);
247 }
248
249
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252 int from)
253 {
254 struct sctp_cwnd_log sctp_clog;
255
256 if (control == NULL) {
257 SCTP_PRINTF("Gak log of NULL?\n");
258 return;
259 }
260 sctp_clog.x.strlog.stcb = control->stcb;
261 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 sctp_clog.x.strlog.strm = control->sinfo_stream;
264 if (poschk != NULL) {
265 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 } else {
268 sctp_clog.x.strlog.e_tsn = 0;
269 sctp_clog.x.strlog.e_sseq = 0;
270 }
271 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 SCTP_LOG_EVENT_STRM,
273 from,
274 sctp_clog.x.misc.log1,
275 sctp_clog.x.misc.log2,
276 sctp_clog.x.misc.log3,
277 sctp_clog.x.misc.log4);
278
279 }
280
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 struct sctp_cwnd_log sctp_clog;
285
286 sctp_clog.x.cwnd.net = net;
287 if (stcb->asoc.send_queue_cnt > 255)
288 sctp_clog.x.cwnd.cnt_in_send = 255;
289 else
290 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 if (stcb->asoc.stream_queue_cnt > 255)
292 sctp_clog.x.cwnd.cnt_in_str = 255;
293 else
294 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295
296 if (net) {
297 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 sctp_clog.x.cwnd.inflight = net->flight_size;
299 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 }
303 if (SCTP_CWNDLOG_PRESEND == from) {
304 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 }
306 sctp_clog.x.cwnd.cwnd_augment = augment;
307 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 SCTP_LOG_EVENT_CWND,
309 from,
310 sctp_clog.x.misc.log1,
311 sctp_clog.x.misc.log2,
312 sctp_clog.x.misc.log3,
313 sctp_clog.x.misc.log4);
314
315 }
316
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 struct sctp_cwnd_log sctp_clog;
321
322 memset(&sctp_clog, 0, sizeof(sctp_clog));
323 if (inp) {
324 sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325
326 } else {
327 sctp_clog.x.lock.sock = (void *)NULL;
328 }
329 sctp_clog.x.lock.inp = (void *)inp;
330 if (stcb) {
331 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 } else {
333 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 }
335 if (inp) {
336 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 } else {
339 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 }
342 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 if (inp->sctp_socket) {
344 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 } else {
348 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 }
352 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 SCTP_LOG_LOCK_EVENT,
354 from,
355 sctp_clog.x.misc.log1,
356 sctp_clog.x.misc.log2,
357 sctp_clog.x.misc.log3,
358 sctp_clog.x.misc.log4);
359
360 }
361
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 struct sctp_cwnd_log sctp_clog;
366
367 memset(&sctp_clog, 0, sizeof(sctp_clog));
368 sctp_clog.x.cwnd.net = net;
369 sctp_clog.x.cwnd.cwnd_new_value = error;
370 sctp_clog.x.cwnd.inflight = net->flight_size;
371 sctp_clog.x.cwnd.cwnd_augment = burst;
372 if (stcb->asoc.send_queue_cnt > 255)
373 sctp_clog.x.cwnd.cnt_in_send = 255;
374 else
375 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 if (stcb->asoc.stream_queue_cnt > 255)
377 sctp_clog.x.cwnd.cnt_in_str = 255;
378 else
379 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 SCTP_LOG_EVENT_MAXBURST,
382 from,
383 sctp_clog.x.misc.log1,
384 sctp_clog.x.misc.log2,
385 sctp_clog.x.misc.log3,
386 sctp_clog.x.misc.log4);
387
388 }
389
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 struct sctp_cwnd_log sctp_clog;
394
395 sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 sctp_clog.x.rwnd.send_size = snd_size;
397 sctp_clog.x.rwnd.overhead = overhead;
398 sctp_clog.x.rwnd.new_rwnd = 0;
399 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 SCTP_LOG_EVENT_RWND,
401 from,
402 sctp_clog.x.misc.log1,
403 sctp_clog.x.misc.log2,
404 sctp_clog.x.misc.log3,
405 sctp_clog.x.misc.log4);
406 }
407
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 struct sctp_cwnd_log sctp_clog;
412
413 sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 sctp_clog.x.rwnd.send_size = flight_size;
415 sctp_clog.x.rwnd.overhead = overhead;
416 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 SCTP_LOG_EVENT_RWND,
419 from,
420 sctp_clog.x.misc.log1,
421 sctp_clog.x.misc.log2,
422 sctp_clog.x.misc.log3,
423 sctp_clog.x.misc.log4);
424 }
425
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 struct sctp_cwnd_log sctp_clog;
430
431 sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 sctp_clog.x.mbcnt.size_change = book;
433 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 SCTP_LOG_EVENT_MBCNT,
437 from,
438 sctp_clog.x.misc.log1,
439 sctp_clog.x.misc.log2,
440 sctp_clog.x.misc.log3,
441 sctp_clog.x.misc.log4);
442
443 }
444
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 SCTP_LOG_MISC_EVENT,
450 from,
451 a, b, c, d);
452 }
453
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 struct sctp_cwnd_log sctp_clog;
458
459 sctp_clog.x.wake.stcb = (void *)stcb;
460 sctp_clog.x.wake.wake_cnt = wake_cnt;
461 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464
465 if (stcb->asoc.stream_queue_cnt < 0xff)
466 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 else
468 sctp_clog.x.wake.stream_qcnt = 0xff;
469
470 if (stcb->asoc.chunks_on_out_queue < 0xff)
471 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 else
473 sctp_clog.x.wake.chunks_on_oque = 0xff;
474
475 sctp_clog.x.wake.sctpflags = 0;
476 /* set in the defered mode stuff */
477 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 sctp_clog.x.wake.sctpflags |= 1;
479 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 sctp_clog.x.wake.sctpflags |= 2;
481 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 sctp_clog.x.wake.sctpflags |= 4;
483 /* what about the sb */
484 if (stcb->sctp_socket) {
485 struct socket *so = stcb->sctp_socket;
486
487 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 } else {
489 sctp_clog.x.wake.sbflags = 0xff;
490 }
491 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 SCTP_LOG_EVENT_WAKE,
493 from,
494 sctp_clog.x.misc.log1,
495 sctp_clog.x.misc.log2,
496 sctp_clog.x.misc.log3,
497 sctp_clog.x.misc.log4);
498
499 }
500
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 struct sctp_cwnd_log sctp_clog;
505
506 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 sctp_clog.x.blk.sndlen = sendlen;
513 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 SCTP_LOG_EVENT_BLOCK,
515 from,
516 sctp_clog.x.misc.log1,
517 sctp_clog.x.misc.log2,
518 sctp_clog.x.misc.log3,
519 sctp_clog.x.misc.log4);
520
521 }
522
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 /* May need to fix this if ktrdump does not work */
527 return (0);
528 }
529
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 int i;
539 int cnt;
540
541 cnt = 0;
542 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 if ((sctp_audit_data[i][0] == 0xe0) &&
544 (sctp_audit_data[i][1] == 0x01)) {
545 cnt = 0;
546 SCTP_PRINTF("\n");
547 } else if (sctp_audit_data[i][0] == 0xf0) {
548 cnt = 0;
549 SCTP_PRINTF("\n");
550 } else if ((sctp_audit_data[i][0] == 0xc0) &&
551 (sctp_audit_data[i][1] == 0x01)) {
552 SCTP_PRINTF("\n");
553 cnt = 0;
554 }
555 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 (uint32_t) sctp_audit_data[i][1]);
557 cnt++;
558 if ((cnt % 14) == 0)
559 SCTP_PRINTF("\n");
560 }
561 for (i = 0; i < sctp_audit_indx; i++) {
562 if ((sctp_audit_data[i][0] == 0xe0) &&
563 (sctp_audit_data[i][1] == 0x01)) {
564 cnt = 0;
565 SCTP_PRINTF("\n");
566 } else if (sctp_audit_data[i][0] == 0xf0) {
567 cnt = 0;
568 SCTP_PRINTF("\n");
569 } else if ((sctp_audit_data[i][0] == 0xc0) &&
570 (sctp_audit_data[i][1] == 0x01)) {
571 SCTP_PRINTF("\n");
572 cnt = 0;
573 }
574 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 (uint32_t) sctp_audit_data[i][1]);
576 cnt++;
577 if ((cnt % 14) == 0)
578 SCTP_PRINTF("\n");
579 }
580 SCTP_PRINTF("\n");
581 }
582
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585 struct sctp_nets *net)
586 {
587 int resend_cnt, tot_out, rep, tot_book_cnt;
588 struct sctp_nets *lnet;
589 struct sctp_tmit_chunk *chk;
590
591 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 sctp_audit_indx++;
594 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 sctp_audit_indx = 0;
596 }
597 if (inp == NULL) {
598 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 sctp_audit_indx++;
601 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 sctp_audit_indx = 0;
603 }
604 return;
605 }
606 if (stcb == NULL) {
607 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 sctp_audit_indx++;
610 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 sctp_audit_indx = 0;
612 }
613 return;
614 }
615 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 sctp_audit_data[sctp_audit_indx][1] =
617 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 sctp_audit_indx++;
619 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 sctp_audit_indx = 0;
621 }
622 rep = 0;
623 tot_book_cnt = 0;
624 resend_cnt = tot_out = 0;
625 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 resend_cnt++;
628 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 tot_out += chk->book_size;
630 tot_book_cnt++;
631 }
632 }
633 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 sctp_audit_indx++;
637 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 sctp_audit_indx = 0;
639 }
640 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 rep = 1;
643 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 sctp_audit_data[sctp_audit_indx][1] =
646 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 sctp_audit_indx++;
648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 sctp_audit_indx = 0;
650 }
651 }
652 if (tot_out != stcb->asoc.total_flight) {
653 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 sctp_audit_indx++;
656 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 sctp_audit_indx = 0;
658 }
659 rep = 1;
660 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 (int)stcb->asoc.total_flight);
662 stcb->asoc.total_flight = tot_out;
663 }
664 if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 sctp_audit_indx++;
668 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 sctp_audit_indx = 0;
670 }
671 rep = 1;
672 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
673
674 stcb->asoc.total_flight_count = tot_book_cnt;
675 }
676 tot_out = 0;
677 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 tot_out += lnet->flight_size;
679 }
680 if (tot_out != stcb->asoc.total_flight) {
681 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 sctp_audit_indx++;
684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 sctp_audit_indx = 0;
686 }
687 rep = 1;
688 SCTP_PRINTF("real flight:%d net total was %d\n",
689 stcb->asoc.total_flight, tot_out);
690 /* now corrective action */
691 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692
693 tot_out = 0;
694 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 if ((chk->whoTo == lnet) &&
696 (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 tot_out += chk->book_size;
698 }
699 }
700 if (lnet->flight_size != tot_out) {
701 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
702 lnet, lnet->flight_size,
703 tot_out);
704 lnet->flight_size = tot_out;
705 }
706 }
707 }
708 if (rep) {
709 sctp_print_audit_report();
710 }
711 }
712
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716
717 sctp_audit_data[sctp_audit_indx][0] = ev;
718 sctp_audit_data[sctp_audit_indx][1] = fd;
719 sctp_audit_indx++;
720 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 sctp_audit_indx = 0;
722 }
723 }
724
725 #endif
726
727 /*
728 * a list of sizes based on typical mtu's, used only if next hop size not
729 * returned.
730 */
731 static int sctp_mtu_sizes[] = {
732 68,
733 296,
734 508,
735 512,
736 544,
737 576,
738 1006,
739 1492,
740 1500,
741 1536,
742 2002,
743 2048,
744 4352,
745 4464,
746 8166,
747 17914,
748 32000,
749 65535
750 };
751
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 struct sctp_association *asoc;
756 struct sctp_nets *net;
757
758 asoc = &stcb->asoc;
759
760 (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 }
770 }
771
772 int
773 find_next_best_mtu(int totsz)
774 {
775 int i, perfer;
776
777 /*
778 * if we are in here we must find the next best fit based on the
779 * size of the dg that failed to be sent.
780 */
781 perfer = 0;
782 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 if (totsz < sctp_mtu_sizes[i]) {
784 perfer = i - 1;
785 if (perfer < 0)
786 perfer = 0;
787 break;
788 }
789 }
790 return (sctp_mtu_sizes[perfer]);
791 }
792
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 /*
797 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 * our counter. The result becomes our good random numbers and we
799 * then setup to give these out. Note that we do no locking to
800 * protect this. This is ok, since if competing folks call this we
801 * will get more gobbled gook in the random store which is what we
802 * want. There is a danger that two guys will use the same random
803 * numbers, but thats ok too since that is random as well :->
804 */
805 m->store_at = 0;
806 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 sizeof(m->random_counter), (uint8_t *) m->random_store);
809 m->random_counter++;
810 }
811
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 /*
816 * A true implementation should use random selection process to get
817 * the initial stream sequence number, using RFC1750 as a good
818 * guideline
819 */
820 uint32_t x, *xp;
821 uint8_t *p;
822 int store_at, new_store;
823
824 if (inp->initial_sequence_debug != 0) {
825 uint32_t ret;
826
827 ret = inp->initial_sequence_debug;
828 inp->initial_sequence_debug++;
829 return (ret);
830 }
831 retry:
832 store_at = inp->store_at;
833 new_store = store_at + sizeof(uint32_t);
834 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 new_store = 0;
836 }
837 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 goto retry;
839 }
840 if (new_store == 0) {
841 /* Refill the random store */
842 sctp_fill_random_store(inp);
843 }
844 p = &inp->random_store[store_at];
845 xp = (uint32_t *) p;
846 x = *xp;
847 return (x);
848 }
849
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
852 {
853 uint32_t x, not_done;
854 struct timeval now;
855
856 (void)SCTP_GETTIME_TIMEVAL(&now);
857 not_done = 1;
858 while (not_done) {
859 x = sctp_select_initial_TSN(&inp->sctp_ep);
860 if (x == 0) {
861 /* we never use 0 */
862 continue;
863 }
864 if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
865 not_done = 0;
866 }
867 }
868 return (x);
869 }
870
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873 uint32_t override_tag, uint32_t vrf_id)
874 {
875 struct sctp_association *asoc;
876
877 /*
878 * Anything set to zero is taken care of by the allocation routine's
879 * bzero
880 */
881
882 /*
883 * Up front select what scoping to apply on addresses I tell my peer
884 * Not sure what to do with these right now, we will need to come up
885 * with a way to set them. We may need to pass them through from the
886 * caller in the sctp_aloc_assoc() function.
887 */
888 int i;
889
890 asoc = &stcb->asoc;
891 /* init all variables to a known value. */
892 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 asoc->max_burst = m->sctp_ep.max_burst;
894 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 /* EY Init nr_sack variable */
898 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
899 /* JRS 5/21/07 - Init CMT PF variables */
900 asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
901 asoc->sctp_frag_point = m->sctp_frag_point;
902 #ifdef INET
903 asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
904 #else
905 asoc->default_tos = 0;
906 #endif
907
908 #ifdef INET6
909 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
910 #else
911 asoc->default_flowlabel = 0;
912 #endif
913 asoc->sb_send_resv = 0;
914 if (override_tag) {
915 asoc->my_vtag = override_tag;
916 } else {
917 asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
918 }
919 /* Get the nonce tags */
920 asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
922 asoc->vrf_id = vrf_id;
923
924 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
925 asoc->hb_is_disabled = 1;
926 else
927 asoc->hb_is_disabled = 0;
928
929 #ifdef SCTP_ASOCLOG_OF_TSNS
930 asoc->tsn_in_at = 0;
931 asoc->tsn_out_at = 0;
932 asoc->tsn_in_wrapped = 0;
933 asoc->tsn_out_wrapped = 0;
934 asoc->cumack_log_at = 0;
935 asoc->cumack_log_atsnt = 0;
936 #endif
937 #ifdef SCTP_FS_SPEC_LOG
938 asoc->fs_index = 0;
939 #endif
940 asoc->refcnt = 0;
941 asoc->assoc_up_sent = 0;
942 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
943 sctp_select_initial_TSN(&m->sctp_ep);
944 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
945 /* we are optimisitic here */
946 asoc->peer_supports_pktdrop = 1;
947 asoc->peer_supports_nat = 0;
948 asoc->sent_queue_retran_cnt = 0;
949
950 /* for CMT */
951 asoc->last_net_cmt_send_started = NULL;
952
953 /* This will need to be adjusted */
954 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
955 asoc->last_acked_seq = asoc->init_seq_number - 1;
956 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
957 asoc->asconf_seq_in = asoc->last_acked_seq;
958
959 /* here we are different, we hold the next one we expect */
960 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
961
962 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
963 asoc->initial_rto = m->sctp_ep.initial_rto;
964
965 asoc->max_init_times = m->sctp_ep.max_init_times;
966 asoc->max_send_times = m->sctp_ep.max_send_times;
967 asoc->def_net_failure = m->sctp_ep.def_net_failure;
968 asoc->free_chunk_cnt = 0;
969
970 asoc->iam_blocking = 0;
971 /* ECN Nonce initialization */
972 asoc->context = m->sctp_context;
973 asoc->def_send = m->def_send;
974 asoc->ecn_nonce_allowed = 0;
975 asoc->receiver_nonce_sum = 1;
976 asoc->nonce_sum_expect_base = 1;
977 asoc->nonce_sum_check = 1;
978 asoc->nonce_resync_tsn = 0;
979 asoc->nonce_wait_for_ecne = 0;
980 asoc->nonce_wait_tsn = 0;
981 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
982 asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
983 asoc->pr_sctp_cnt = 0;
984 asoc->total_output_queue_size = 0;
985
986 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
987 struct in6pcb *inp6;
988
989 /* Its a V6 socket */
990 inp6 = (struct in6pcb *)m;
991 asoc->ipv6_addr_legal = 1;
992 /* Now look at the binding flag to see if V4 will be legal */
993 if (SCTP_IPV6_V6ONLY(inp6) == 0) {
994 asoc->ipv4_addr_legal = 1;
995 } else {
996 /* V4 addresses are NOT legal on the association */
997 asoc->ipv4_addr_legal = 0;
998 }
999 } else {
1000 /* Its a V4 socket, no - V6 */
1001 asoc->ipv4_addr_legal = 1;
1002 asoc->ipv6_addr_legal = 0;
1003 }
1004
1005 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1006 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1007
1008 asoc->smallest_mtu = m->sctp_frag_point;
1009 #ifdef SCTP_PRINT_FOR_B_AND_M
1010 SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1011 asoc->smallest_mtu);
1012 #endif
1013 asoc->minrto = m->sctp_ep.sctp_minrto;
1014 asoc->maxrto = m->sctp_ep.sctp_maxrto;
1015
1016 asoc->locked_on_sending = NULL;
1017 asoc->stream_locked_on = 0;
1018 asoc->ecn_echo_cnt_onq = 0;
1019 asoc->stream_locked = 0;
1020
1021 asoc->send_sack = 1;
1022
1023 LIST_INIT(&asoc->sctp_restricted_addrs);
1024
1025 TAILQ_INIT(&asoc->nets);
1026 TAILQ_INIT(&asoc->pending_reply_queue);
1027 TAILQ_INIT(&asoc->asconf_ack_sent);
1028 /* Setup to fill the hb random cache at first HB */
1029 asoc->hb_random_idx = 4;
1030
1031 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1032
1033 /*
1034 * JRS - Pick the default congestion control module based on the
1035 * sysctl.
1036 */
1037 switch (m->sctp_ep.sctp_default_cc_module) {
1038 /* JRS - Standard TCP congestion control */
1039 case SCTP_CC_RFC2581:
1040 {
1041 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1042 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1043 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1044 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1045 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1046 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1047 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1048 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1049 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1050 break;
1051 }
1052 /* JRS - High Speed TCP congestion control (Floyd) */
1053 case SCTP_CC_HSTCP:
1054 {
1055 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1056 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1058 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1059 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 break;
1065 }
1066 /* JRS - HTCP congestion control */
1067 case SCTP_CC_HTCP:
1068 {
1069 stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1070 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1071 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1072 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1073 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1074 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1075 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1078 break;
1079 }
1080 /* JRS - By default, use RFC2581 */
1081 default:
1082 {
1083 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1084 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1085 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1086 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1087 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1088 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1089 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1092 break;
1093 }
1094 }
1095
1096 /*
1097 * Now the stream parameters, here we allocate space for all streams
1098 * that we request by default.
1099 */
1100 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1101 m->sctp_ep.pre_open_stream_count;
1102 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1103 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1104 SCTP_M_STRMO);
1105 if (asoc->strmout == NULL) {
1106 /* big trouble no memory */
1107 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1108 return (ENOMEM);
1109 }
1110 for (i = 0; i < asoc->streamoutcnt; i++) {
1111 /*
1112 * inbound side must be set to 0xffff, also NOTE when we get
1113 * the INIT-ACK back (for INIT sender) we MUST reduce the
1114 * count (streamoutcnt) but first check if we sent to any of
1115 * the upper streams that were dropped (if some were). Those
1116 * that were dropped must be notified to the upper layer as
1117 * failed to send.
1118 */
1119 asoc->strmout[i].next_sequence_sent = 0x0;
1120 TAILQ_INIT(&asoc->strmout[i].outqueue);
1121 asoc->strmout[i].stream_no = i;
1122 asoc->strmout[i].last_msg_incomplete = 0;
1123 asoc->strmout[i].next_spoke.tqe_next = 0;
1124 asoc->strmout[i].next_spoke.tqe_prev = 0;
1125 }
1126 /* Now the mapping array */
1127 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1128 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1129 SCTP_M_MAP);
1130 if (asoc->mapping_array == NULL) {
1131 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1132 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1133 return (ENOMEM);
1134 }
1135 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1136 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1137 SCTP_M_MAP);
1138 if (asoc->nr_mapping_array == NULL) {
1139 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1140 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1141 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1142 return (ENOMEM);
1143 }
1144 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1145
1146 /* Now the init of the other outqueues */
1147 TAILQ_INIT(&asoc->free_chunks);
1148 TAILQ_INIT(&asoc->out_wheel);
1149 TAILQ_INIT(&asoc->control_send_queue);
1150 TAILQ_INIT(&asoc->asconf_send_queue);
1151 TAILQ_INIT(&asoc->send_queue);
1152 TAILQ_INIT(&asoc->sent_queue);
1153 TAILQ_INIT(&asoc->reasmqueue);
1154 TAILQ_INIT(&asoc->resetHead);
1155 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1156 TAILQ_INIT(&asoc->asconf_queue);
1157 /* authentication fields */
1158 asoc->authinfo.random = NULL;
1159 asoc->authinfo.active_keyid = 0;
1160 asoc->authinfo.assoc_key = NULL;
1161 asoc->authinfo.assoc_keyid = 0;
1162 asoc->authinfo.recv_key = NULL;
1163 asoc->authinfo.recv_keyid = 0;
1164 LIST_INIT(&asoc->shared_keys);
1165 asoc->marked_retrans = 0;
1166 asoc->timoinit = 0;
1167 asoc->timodata = 0;
1168 asoc->timosack = 0;
1169 asoc->timoshutdown = 0;
1170 asoc->timoheartbeat = 0;
1171 asoc->timocookie = 0;
1172 asoc->timoshutdownack = 0;
1173 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1174 asoc->discontinuity_time = asoc->start_time;
1175 /*
1176 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1177 * freed later when the association is freed.
1178 */
1179 return (0);
1180 }
1181
1182 void
1183 sctp_print_mapping_array(struct sctp_association *asoc)
1184 {
1185 unsigned int i, limit;
1186
1187 printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1188 asoc->mapping_array_size,
1189 asoc->mapping_array_base_tsn,
1190 asoc->cumulative_tsn,
1191 asoc->highest_tsn_inside_map,
1192 asoc->highest_tsn_inside_nr_map);
1193 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1194 if (asoc->mapping_array[limit - 1]) {
1195 break;
1196 }
1197 }
1198 printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1199 for (i = 0; i < limit; i++) {
1200 printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1201 if (((i + 1) % 16) == 0)
1202 printf("\n");
1203 }
1204 if (limit % 16)
1205 printf("\n");
1206 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1207 if (asoc->nr_mapping_array[limit - 1]) {
1208 break;
1209 }
1210 }
1211 printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1212 for (i = 0; i < limit; i++) {
1213 printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1214 }
1215 if (limit % 16)
1216 printf("\n");
1217 }
1218
1219 int
1220 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1221 {
1222 /* mapping array needs to grow */
1223 uint8_t *new_array1, *new_array2;
1224 uint32_t new_size;
1225
1226 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1227 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1228 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1229 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1230 /* can't get more, forget it */
1231 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1232 if (new_array1) {
1233 SCTP_FREE(new_array1, SCTP_M_MAP);
1234 }
1235 if (new_array2) {
1236 SCTP_FREE(new_array2, SCTP_M_MAP);
1237 }
1238 return (-1);
1239 }
1240 memset(new_array1, 0, new_size);
1241 memset(new_array2, 0, new_size);
1242 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1243 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1244 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1245 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1246 asoc->mapping_array = new_array1;
1247 asoc->nr_mapping_array = new_array2;
1248 asoc->mapping_array_size = new_size;
1249 return (0);
1250 }
1251
1252
1253 static void
1254 sctp_iterator_work(struct sctp_iterator *it)
1255 {
1256 int iteration_count = 0;
1257 int inp_skip = 0;
1258 int first_in = 1;
1259 struct sctp_inpcb *tinp;
1260
1261 SCTP_INP_INFO_RLOCK();
1262 SCTP_ITERATOR_LOCK();
1263 if (it->inp) {
1264 SCTP_INP_RLOCK(it->inp);
1265 SCTP_INP_DECR_REF(it->inp);
1266 }
1267 if (it->inp == NULL) {
1268 /* iterator is complete */
1269 done_with_iterator:
1270 SCTP_ITERATOR_UNLOCK();
1271 SCTP_INP_INFO_RUNLOCK();
1272 if (it->function_atend != NULL) {
1273 (*it->function_atend) (it->pointer, it->val);
1274 }
1275 SCTP_FREE(it, SCTP_M_ITER);
1276 return;
1277 }
1278 select_a_new_ep:
1279 if (first_in) {
1280 first_in = 0;
1281 } else {
1282 SCTP_INP_RLOCK(it->inp);
1283 }
1284 while (((it->pcb_flags) &&
1285 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1286 ((it->pcb_features) &&
1287 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1288 /* endpoint flags or features don't match, so keep looking */
1289 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1290 SCTP_INP_RUNLOCK(it->inp);
1291 goto done_with_iterator;
1292 }
1293 tinp = it->inp;
1294 it->inp = LIST_NEXT(it->inp, sctp_list);
1295 SCTP_INP_RUNLOCK(tinp);
1296 if (it->inp == NULL) {
1297 goto done_with_iterator;
1298 }
1299 SCTP_INP_RLOCK(it->inp);
1300 }
1301 /* now go through each assoc which is in the desired state */
1302 if (it->done_current_ep == 0) {
1303 if (it->function_inp != NULL)
1304 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1305 it->done_current_ep = 1;
1306 }
1307 if (it->stcb == NULL) {
1308 /* run the per instance function */
1309 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1310 }
1311 if ((inp_skip) || it->stcb == NULL) {
1312 if (it->function_inp_end != NULL) {
1313 inp_skip = (*it->function_inp_end) (it->inp,
1314 it->pointer,
1315 it->val);
1316 }
1317 SCTP_INP_RUNLOCK(it->inp);
1318 goto no_stcb;
1319 }
1320 while (it->stcb) {
1321 SCTP_TCB_LOCK(it->stcb);
1322 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1323 /* not in the right state... keep looking */
1324 SCTP_TCB_UNLOCK(it->stcb);
1325 goto next_assoc;
1326 }
1327 /* see if we have limited out the iterator loop */
1328 iteration_count++;
1329 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1330 /* Pause to let others grab the lock */
1331 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1332 SCTP_TCB_UNLOCK(it->stcb);
1333 SCTP_INP_INCR_REF(it->inp);
1334 SCTP_INP_RUNLOCK(it->inp);
1335 SCTP_ITERATOR_UNLOCK();
1336 SCTP_INP_INFO_RUNLOCK();
1337 SCTP_INP_INFO_RLOCK();
1338 SCTP_ITERATOR_LOCK();
1339 if (sctp_it_ctl.iterator_flags) {
1340 /* We won't be staying here */
1341 SCTP_INP_DECR_REF(it->inp);
1342 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1343 if (sctp_it_ctl.iterator_flags &
1344 SCTP_ITERATOR_MUST_EXIT) {
1345 goto done_with_iterator;
1346 }
1347 if (sctp_it_ctl.iterator_flags &
1348 SCTP_ITERATOR_STOP_CUR_IT) {
1349 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1350 goto done_with_iterator;
1351 }
1352 if (sctp_it_ctl.iterator_flags &
1353 SCTP_ITERATOR_STOP_CUR_INP) {
1354 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1355 goto no_stcb;
1356 }
1357 /* If we reach here huh? */
1358 printf("Unknown it ctl flag %x\n",
1359 sctp_it_ctl.iterator_flags);
1360 sctp_it_ctl.iterator_flags = 0;
1361 }
1362 SCTP_INP_RLOCK(it->inp);
1363 SCTP_INP_DECR_REF(it->inp);
1364 SCTP_TCB_LOCK(it->stcb);
1365 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1366 iteration_count = 0;
1367 }
1368 /* run function on this one */
1369 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1370
1371 /*
1372 * we lie here, it really needs to have its own type but
1373 * first I must verify that this won't effect things :-0
1374 */
1375 if (it->no_chunk_output == 0)
1376 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1377
1378 SCTP_TCB_UNLOCK(it->stcb);
1379 next_assoc:
1380 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1381 if (it->stcb == NULL) {
1382 /* Run last function */
1383 if (it->function_inp_end != NULL) {
1384 inp_skip = (*it->function_inp_end) (it->inp,
1385 it->pointer,
1386 it->val);
1387 }
1388 }
1389 }
1390 SCTP_INP_RUNLOCK(it->inp);
1391 no_stcb:
1392 /* done with all assocs on this endpoint, move on to next endpoint */
1393 it->done_current_ep = 0;
1394 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1395 it->inp = NULL;
1396 } else {
1397 it->inp = LIST_NEXT(it->inp, sctp_list);
1398 }
1399 if (it->inp == NULL) {
1400 goto done_with_iterator;
1401 }
1402 goto select_a_new_ep;
1403 }
1404
1405 void
1406 sctp_iterator_worker(void)
1407 {
1408 struct sctp_iterator *it = NULL;
1409
1410 /* This function is called with the WQ lock in place */
1411
1412 sctp_it_ctl.iterator_running = 1;
1413 sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1414 while (it) {
1415 /* now lets work on this one */
1416 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1417 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1418 CURVNET_SET(it->vn);
1419 sctp_iterator_work(it);
1420
1421 CURVNET_RESTORE();
1422 SCTP_IPI_ITERATOR_WQ_LOCK();
1423 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1424 sctp_it_ctl.cur_it = NULL;
1425 break;
1426 }
1427 /* sa_ignore FREED_MEMORY */
1428 sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1429 }
1430 sctp_it_ctl.iterator_running = 0;
1431 return;
1432 }
1433
1434
1435 static void
1436 sctp_handle_addr_wq(void)
1437 {
1438 /* deal with the ADDR wq from the rtsock calls */
1439 struct sctp_laddr *wi;
1440 struct sctp_asconf_iterator *asc;
1441
1442 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1443 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1444 if (asc == NULL) {
1445 /* Try later, no memory */
1446 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1447 (struct sctp_inpcb *)NULL,
1448 (struct sctp_tcb *)NULL,
1449 (struct sctp_nets *)NULL);
1450 return;
1451 }
1452 LIST_INIT(&asc->list_of_work);
1453 asc->cnt = 0;
1454
1455 SCTP_WQ_ADDR_LOCK();
1456 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1457 while (wi != NULL) {
1458 LIST_REMOVE(wi, sctp_nxt_addr);
1459 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1460 asc->cnt++;
1461 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1462 }
1463 SCTP_WQ_ADDR_UNLOCK();
1464
1465 if (asc->cnt == 0) {
1466 SCTP_FREE(asc, SCTP_M_ASC_IT);
1467 } else {
1468 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1469 sctp_asconf_iterator_stcb,
1470 NULL, /* No ep end for boundall */
1471 SCTP_PCB_FLAGS_BOUNDALL,
1472 SCTP_PCB_ANY_FEATURES,
1473 SCTP_ASOC_ANY_STATE,
1474 (void *)asc, 0,
1475 sctp_asconf_iterator_end, NULL, 0);
1476 }
1477 }
1478
1479 int retcode = 0;
1480 int cur_oerr = 0;
1481
1482 void
1483 sctp_timeout_handler(void *t)
1484 {
1485 struct sctp_inpcb *inp;
1486 struct sctp_tcb *stcb;
1487 struct sctp_nets *net;
1488 struct sctp_timer *tmr;
1489
1490 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1491 struct socket *so;
1492
1493 #endif
1494 int did_output, type;
1495
1496 tmr = (struct sctp_timer *)t;
1497 inp = (struct sctp_inpcb *)tmr->ep;
1498 stcb = (struct sctp_tcb *)tmr->tcb;
1499 net = (struct sctp_nets *)tmr->net;
1500 CURVNET_SET((struct vnet *)tmr->vnet);
1501 did_output = 1;
1502
1503 #ifdef SCTP_AUDITING_ENABLED
1504 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1505 sctp_auditing(3, inp, stcb, net);
1506 #endif
1507
1508 /* sanity checks... */
1509 if (tmr->self != (void *)tmr) {
1510 /*
1511 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1512 * tmr);
1513 */
1514 CURVNET_RESTORE();
1515 return;
1516 }
1517 tmr->stopped_from = 0xa001;
1518 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1519 /*
1520 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1521 * tmr->type);
1522 */
1523 CURVNET_RESTORE();
1524 return;
1525 }
1526 tmr->stopped_from = 0xa002;
1527 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1528 CURVNET_RESTORE();
1529 return;
1530 }
1531 /* if this is an iterator timeout, get the struct and clear inp */
1532 tmr->stopped_from = 0xa003;
1533 type = tmr->type;
1534 if (inp) {
1535 SCTP_INP_INCR_REF(inp);
1536 if ((inp->sctp_socket == 0) &&
1537 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1538 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1539 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1540 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1541 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1542 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1543 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1544 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1545 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1546 ) {
1547 SCTP_INP_DECR_REF(inp);
1548 CURVNET_RESTORE();
1549 return;
1550 }
1551 }
1552 tmr->stopped_from = 0xa004;
1553 if (stcb) {
1554 atomic_add_int(&stcb->asoc.refcnt, 1);
1555 if (stcb->asoc.state == 0) {
1556 atomic_add_int(&stcb->asoc.refcnt, -1);
1557 if (inp) {
1558 SCTP_INP_DECR_REF(inp);
1559 }
1560 CURVNET_RESTORE();
1561 return;
1562 }
1563 }
1564 tmr->stopped_from = 0xa005;
1565 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1566 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1567 if (inp) {
1568 SCTP_INP_DECR_REF(inp);
1569 }
1570 if (stcb) {
1571 atomic_add_int(&stcb->asoc.refcnt, -1);
1572 }
1573 CURVNET_RESTORE();
1574 return;
1575 }
1576 tmr->stopped_from = 0xa006;
1577
1578 if (stcb) {
1579 SCTP_TCB_LOCK(stcb);
1580 atomic_add_int(&stcb->asoc.refcnt, -1);
1581 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1582 ((stcb->asoc.state == 0) ||
1583 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1584 SCTP_TCB_UNLOCK(stcb);
1585 if (inp) {
1586 SCTP_INP_DECR_REF(inp);
1587 }
1588 CURVNET_RESTORE();
1589 return;
1590 }
1591 }
1592 /* record in stopped what t-o occured */
1593 tmr->stopped_from = tmr->type;
1594
1595 /* mark as being serviced now */
1596 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1597 /*
1598 * Callout has been rescheduled.
1599 */
1600 goto get_out;
1601 }
1602 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1603 /*
1604 * Not active, so no action.
1605 */
1606 goto get_out;
1607 }
1608 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1609
1610 /* call the handler for the appropriate timer type */
1611 switch (tmr->type) {
1612 case SCTP_TIMER_TYPE_ZERO_COPY:
1613 if (inp == NULL) {
1614 break;
1615 }
1616 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1617 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1618 }
1619 break;
1620 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1621 if (inp == NULL) {
1622 break;
1623 }
1624 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1625 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1626 }
1627 break;
1628 case SCTP_TIMER_TYPE_ADDR_WQ:
1629 sctp_handle_addr_wq();
1630 break;
1631 case SCTP_TIMER_TYPE_SEND:
1632 if ((stcb == NULL) || (inp == NULL)) {
1633 break;
1634 }
1635 SCTP_STAT_INCR(sctps_timodata);
1636 stcb->asoc.timodata++;
1637 stcb->asoc.num_send_timers_up--;
1638 if (stcb->asoc.num_send_timers_up < 0) {
1639 stcb->asoc.num_send_timers_up = 0;
1640 }
1641 SCTP_TCB_LOCK_ASSERT(stcb);
1642 cur_oerr = stcb->asoc.overall_error_count;
1643 retcode = sctp_t3rxt_timer(inp, stcb, net);
1644 if (retcode) {
1645 /* no need to unlock on tcb its gone */
1646
1647 goto out_decr;
1648 }
1649 SCTP_TCB_LOCK_ASSERT(stcb);
1650 #ifdef SCTP_AUDITING_ENABLED
1651 sctp_auditing(4, inp, stcb, net);
1652 #endif
1653 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1654 if ((stcb->asoc.num_send_timers_up == 0) &&
1655 (stcb->asoc.sent_queue_cnt > 0)
1656 ) {
1657 struct sctp_tmit_chunk *chk;
1658
1659 /*
1660 * safeguard. If there on some on the sent queue
1661 * somewhere but no timers running something is
1662 * wrong... so we start a timer on the first chunk
1663 * on the send queue on whatever net it is sent to.
1664 */
1665 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1666 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1667 chk->whoTo);
1668 }
1669 break;
1670 case SCTP_TIMER_TYPE_INIT:
1671 if ((stcb == NULL) || (inp == NULL)) {
1672 break;
1673 }
1674 SCTP_STAT_INCR(sctps_timoinit);
1675 stcb->asoc.timoinit++;
1676 if (sctp_t1init_timer(inp, stcb, net)) {
1677 /* no need to unlock on tcb its gone */
1678 goto out_decr;
1679 }
1680 /* We do output but not here */
1681 did_output = 0;
1682 break;
1683 case SCTP_TIMER_TYPE_RECV:
1684 if ((stcb == NULL) || (inp == NULL)) {
1685 break;
1686 } {
1687 SCTP_STAT_INCR(sctps_timosack);
1688 stcb->asoc.timosack++;
1689 sctp_send_sack(stcb);
1690 }
1691 #ifdef SCTP_AUDITING_ENABLED
1692 sctp_auditing(4, inp, stcb, net);
1693 #endif
1694 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1695 break;
1696 case SCTP_TIMER_TYPE_SHUTDOWN:
1697 if ((stcb == NULL) || (inp == NULL)) {
1698 break;
1699 }
1700 if (sctp_shutdown_timer(inp, stcb, net)) {
1701 /* no need to unlock on tcb its gone */
1702 goto out_decr;
1703 }
1704 SCTP_STAT_INCR(sctps_timoshutdown);
1705 stcb->asoc.timoshutdown++;
1706 #ifdef SCTP_AUDITING_ENABLED
1707 sctp_auditing(4, inp, stcb, net);
1708 #endif
1709 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1710 break;
1711 case SCTP_TIMER_TYPE_HEARTBEAT:
1712 {
1713 struct sctp_nets *lnet;
1714 int cnt_of_unconf = 0;
1715
1716 if ((stcb == NULL) || (inp == NULL)) {
1717 break;
1718 }
1719 SCTP_STAT_INCR(sctps_timoheartbeat);
1720 stcb->asoc.timoheartbeat++;
1721 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1722 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1723 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1724 cnt_of_unconf++;
1725 }
1726 }
1727 if (cnt_of_unconf == 0) {
1728 if (sctp_heartbeat_timer(inp, stcb, lnet,
1729 cnt_of_unconf)) {
1730 /* no need to unlock on tcb its gone */
1731 goto out_decr;
1732 }
1733 }
1734 #ifdef SCTP_AUDITING_ENABLED
1735 sctp_auditing(4, inp, stcb, lnet);
1736 #endif
1737 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1738 stcb->sctp_ep, stcb, lnet);
1739 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1740 }
1741 break;
1742 case SCTP_TIMER_TYPE_COOKIE:
1743 if ((stcb == NULL) || (inp == NULL)) {
1744 break;
1745 }
1746 if (sctp_cookie_timer(inp, stcb, net)) {
1747 /* no need to unlock on tcb its gone */
1748 goto out_decr;
1749 }
1750 SCTP_STAT_INCR(sctps_timocookie);
1751 stcb->asoc.timocookie++;
1752 #ifdef SCTP_AUDITING_ENABLED
1753 sctp_auditing(4, inp, stcb, net);
1754 #endif
1755 /*
1756 * We consider T3 and Cookie timer pretty much the same with
1757 * respect to where from in chunk_output.
1758 */
1759 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1760 break;
1761 case SCTP_TIMER_TYPE_NEWCOOKIE:
1762 {
1763 struct timeval tv;
1764 int i, secret;
1765
1766 if (inp == NULL) {
1767 break;
1768 }
1769 SCTP_STAT_INCR(sctps_timosecret);
1770 (void)SCTP_GETTIME_TIMEVAL(&tv);
1771 SCTP_INP_WLOCK(inp);
1772 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1773 inp->sctp_ep.last_secret_number =
1774 inp->sctp_ep.current_secret_number;
1775 inp->sctp_ep.current_secret_number++;
1776 if (inp->sctp_ep.current_secret_number >=
1777 SCTP_HOW_MANY_SECRETS) {
1778 inp->sctp_ep.current_secret_number = 0;
1779 }
1780 secret = (int)inp->sctp_ep.current_secret_number;
1781 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1782 inp->sctp_ep.secret_key[secret][i] =
1783 sctp_select_initial_TSN(&inp->sctp_ep);
1784 }
1785 SCTP_INP_WUNLOCK(inp);
1786 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1787 }
1788 did_output = 0;
1789 break;
1790 case SCTP_TIMER_TYPE_PATHMTURAISE:
1791 if ((stcb == NULL) || (inp == NULL)) {
1792 break;
1793 }
1794 SCTP_STAT_INCR(sctps_timopathmtu);
1795 sctp_pathmtu_timer(inp, stcb, net);
1796 did_output = 0;
1797 break;
1798 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1799 if ((stcb == NULL) || (inp == NULL)) {
1800 break;
1801 }
1802 if (sctp_shutdownack_timer(inp, stcb, net)) {
1803 /* no need to unlock on tcb its gone */
1804 goto out_decr;
1805 }
1806 SCTP_STAT_INCR(sctps_timoshutdownack);
1807 stcb->asoc.timoshutdownack++;
1808 #ifdef SCTP_AUDITING_ENABLED
1809 sctp_auditing(4, inp, stcb, net);
1810 #endif
1811 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1812 break;
1813 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1814 if ((stcb == NULL) || (inp == NULL)) {
1815 break;
1816 }
1817 SCTP_STAT_INCR(sctps_timoshutdownguard);
1818 sctp_abort_an_association(inp, stcb,
1819 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1820 /* no need to unlock on tcb its gone */
1821 goto out_decr;
1822
1823 case SCTP_TIMER_TYPE_STRRESET:
1824 if ((stcb == NULL) || (inp == NULL)) {
1825 break;
1826 }
1827 if (sctp_strreset_timer(inp, stcb, net)) {
1828 /* no need to unlock on tcb its gone */
1829 goto out_decr;
1830 }
1831 SCTP_STAT_INCR(sctps_timostrmrst);
1832 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1833 break;
1834 case SCTP_TIMER_TYPE_EARLYFR:
1835 /* Need to do FR of things for net */
1836 if ((stcb == NULL) || (inp == NULL)) {
1837 break;
1838 }
1839 SCTP_STAT_INCR(sctps_timoearlyfr);
1840 sctp_early_fr_timer(inp, stcb, net);
1841 break;
1842 case SCTP_TIMER_TYPE_ASCONF:
1843 if ((stcb == NULL) || (inp == NULL)) {
1844 break;
1845 }
1846 if (sctp_asconf_timer(inp, stcb, net)) {
1847 /* no need to unlock on tcb its gone */
1848 goto out_decr;
1849 }
1850 SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 break;
1856 case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 if ((stcb == NULL) || (inp == NULL)) {
1858 break;
1859 }
1860 sctp_delete_prim_timer(inp, stcb, net);
1861 SCTP_STAT_INCR(sctps_timodelprim);
1862 break;
1863
1864 case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 if ((stcb == NULL) || (inp == NULL)) {
1866 break;
1867 }
1868 SCTP_STAT_INCR(sctps_timoautoclose);
1869 sctp_autoclose_timer(inp, stcb, net);
1870 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 did_output = 0;
1872 break;
1873 case SCTP_TIMER_TYPE_ASOCKILL:
1874 if ((stcb == NULL) || (inp == NULL)) {
1875 break;
1876 }
1877 SCTP_STAT_INCR(sctps_timoassockill);
1878 /* Can we free it yet? */
1879 SCTP_INP_DECR_REF(inp);
1880 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1881 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1882 so = SCTP_INP_SO(inp);
1883 atomic_add_int(&stcb->asoc.refcnt, 1);
1884 SCTP_TCB_UNLOCK(stcb);
1885 SCTP_SOCKET_LOCK(so, 1);
1886 SCTP_TCB_LOCK(stcb);
1887 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1888 #endif
1889 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1890 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1891 SCTP_SOCKET_UNLOCK(so, 1);
1892 #endif
1893 /*
1894 * free asoc, always unlocks (or destroy's) so prevent
1895 * duplicate unlock or unlock of a free mtx :-0
1896 */
1897 stcb = NULL;
1898 goto out_no_decr;
1899 case SCTP_TIMER_TYPE_INPKILL:
1900 SCTP_STAT_INCR(sctps_timoinpkill);
1901 if (inp == NULL) {
1902 break;
1903 }
1904 /*
1905 * special case, take away our increment since WE are the
1906 * killer
1907 */
1908 SCTP_INP_DECR_REF(inp);
1909 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1910 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1911 SCTP_CALLED_FROM_INPKILL_TIMER);
1912 inp = NULL;
1913 goto out_no_decr;
1914 default:
1915 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1916 tmr->type);
1917 break;
1918 };
1919 #ifdef SCTP_AUDITING_ENABLED
1920 sctp_audit_log(0xF1, (uint8_t) tmr->type);
1921 if (inp)
1922 sctp_auditing(5, inp, stcb, net);
1923 #endif
1924 if ((did_output) && stcb) {
1925 /*
1926 * Now we need to clean up the control chunk chain if an
1927 * ECNE is on it. It must be marked as UNSENT again so next
1928 * call will continue to send it until such time that we get
1929 * a CWR, to remove it. It is, however, less likely that we
1930 * will find a ecn echo on the chain though.
1931 */
1932 sctp_fix_ecn_echo(&stcb->asoc);
1933 }
1934 get_out:
1935 if (stcb) {
1936 SCTP_TCB_UNLOCK(stcb);
1937 }
1938 out_decr:
1939 if (inp) {
1940 SCTP_INP_DECR_REF(inp);
1941 }
1942 out_no_decr:
1943 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1944 type);
1945 CURVNET_RESTORE();
1946 }
1947
1948 void
1949 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1950 struct sctp_nets *net)
1951 {
1952 int to_ticks;
1953 struct sctp_timer *tmr;
1954
1955 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1956 return;
1957
1958 to_ticks = 0;
1959
1960 tmr = NULL;
1961 if (stcb) {
1962 SCTP_TCB_LOCK_ASSERT(stcb);
1963 }
1964 switch (t_type) {
1965 case SCTP_TIMER_TYPE_ZERO_COPY:
1966 tmr = &inp->sctp_ep.zero_copy_timer;
1967 to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 break;
1969 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 break;
1973 case SCTP_TIMER_TYPE_ADDR_WQ:
1974 /* Only 1 tick away :-) */
1975 tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 break;
1978 case SCTP_TIMER_TYPE_SEND:
1979 /* Here we use the RTO timer */
1980 {
1981 int rto_val;
1982
1983 if ((stcb == NULL) || (net == NULL)) {
1984 return;
1985 }
1986 tmr = &net->rxt_timer;
1987 if (net->RTO == 0) {
1988 rto_val = stcb->asoc.initial_rto;
1989 } else {
1990 rto_val = net->RTO;
1991 }
1992 to_ticks = MSEC_TO_TICKS(rto_val);
1993 }
1994 break;
1995 case SCTP_TIMER_TYPE_INIT:
1996 /*
1997 * Here we use the INIT timer default usually about 1
1998 * minute.
1999 */
2000 if ((stcb == NULL) || (net == NULL)) {
2001 return;
2002 }
2003 tmr = &net->rxt_timer;
2004 if (net->RTO == 0) {
2005 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 } else {
2007 to_ticks = MSEC_TO_TICKS(net->RTO);
2008 }
2009 break;
2010 case SCTP_TIMER_TYPE_RECV:
2011 /*
2012 * Here we use the Delayed-Ack timer value from the inp
2013 * ususually about 200ms.
2014 */
2015 if (stcb == NULL) {
2016 return;
2017 }
2018 tmr = &stcb->asoc.dack_timer;
2019 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 break;
2021 case SCTP_TIMER_TYPE_SHUTDOWN:
2022 /* Here we use the RTO of the destination. */
2023 if ((stcb == NULL) || (net == NULL)) {
2024 return;
2025 }
2026 if (net->RTO == 0) {
2027 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 } else {
2029 to_ticks = MSEC_TO_TICKS(net->RTO);
2030 }
2031 tmr = &net->rxt_timer;
2032 break;
2033 case SCTP_TIMER_TYPE_HEARTBEAT:
2034 /*
2035 * the net is used here so that we can add in the RTO. Even
2036 * though we use a different timer. We also add the HB timer
2037 * PLUS a random jitter.
2038 */
2039 if ((inp == NULL) || (stcb == NULL)) {
2040 return;
2041 } else {
2042 uint32_t rndval;
2043 uint8_t this_random;
2044 int cnt_of_unconf = 0;
2045 struct sctp_nets *lnet;
2046
2047 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2048 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2049 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2050 cnt_of_unconf++;
2051 }
2052 }
2053 if (cnt_of_unconf) {
2054 net = lnet = NULL;
2055 (void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2056 }
2057 if (stcb->asoc.hb_random_idx > 3) {
2058 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2059 memcpy(stcb->asoc.hb_random_values, &rndval,
2060 sizeof(stcb->asoc.hb_random_values));
2061 stcb->asoc.hb_random_idx = 0;
2062 }
2063 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2064 stcb->asoc.hb_random_idx++;
2065 stcb->asoc.hb_ect_randombit = 0;
2066 /*
2067 * this_random will be 0 - 256 ms RTO is in ms.
2068 */
2069 if ((stcb->asoc.hb_is_disabled) &&
2070 (cnt_of_unconf == 0)) {
2071 return;
2072 }
2073 if (net) {
2074 int delay;
2075
2076 delay = stcb->asoc.heart_beat_delay;
2077 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2078 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2079 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2080 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2081 delay = 0;
2082 }
2083 }
2084 if (net->RTO == 0) {
2085 /* Never been checked */
2086 to_ticks = this_random + stcb->asoc.initial_rto + delay;
2087 } else {
2088 /* set rto_val to the ms */
2089 to_ticks = delay + net->RTO + this_random;
2090 }
2091 } else {
2092 if (cnt_of_unconf) {
2093 to_ticks = this_random + stcb->asoc.initial_rto;
2094 } else {
2095 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2096 }
2097 }
2098 /*
2099 * Now we must convert the to_ticks that are now in
2100 * ms to ticks.
2101 */
2102 to_ticks = MSEC_TO_TICKS(to_ticks);
2103 tmr = &stcb->asoc.hb_timer;
2104 }
2105 break;
2106 case SCTP_TIMER_TYPE_COOKIE:
2107 /*
2108 * Here we can use the RTO timer from the network since one
2109 * RTT was compelete. If a retran happened then we will be
2110 * using the RTO initial value.
2111 */
2112 if ((stcb == NULL) || (net == NULL)) {
2113 return;
2114 }
2115 if (net->RTO == 0) {
2116 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 } else {
2118 to_ticks = MSEC_TO_TICKS(net->RTO);
2119 }
2120 tmr = &net->rxt_timer;
2121 break;
2122 case SCTP_TIMER_TYPE_NEWCOOKIE:
2123 /*
2124 * nothing needed but the endpoint here ususually about 60
2125 * minutes.
2126 */
2127 if (inp == NULL) {
2128 return;
2129 }
2130 tmr = &inp->sctp_ep.signature_change;
2131 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2132 break;
2133 case SCTP_TIMER_TYPE_ASOCKILL:
2134 if (stcb == NULL) {
2135 return;
2136 }
2137 tmr = &stcb->asoc.strreset_timer;
2138 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2139 break;
2140 case SCTP_TIMER_TYPE_INPKILL:
2141 /*
2142 * The inp is setup to die. We re-use the signature_chage
2143 * timer since that has stopped and we are in the GONE
2144 * state.
2145 */
2146 if (inp == NULL) {
2147 return;
2148 }
2149 tmr = &inp->sctp_ep.signature_change;
2150 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2151 break;
2152 case SCTP_TIMER_TYPE_PATHMTURAISE:
2153 /*
2154 * Here we use the value found in the EP for PMTU ususually
2155 * about 10 minutes.
2156 */
2157 if ((stcb == NULL) || (inp == NULL)) {
2158 return;
2159 }
2160 if (net == NULL) {
2161 return;
2162 }
2163 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2164 tmr = &net->pmtu_timer;
2165 break;
2166 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2167 /* Here we use the RTO of the destination */
2168 if ((stcb == NULL) || (net == NULL)) {
2169 return;
2170 }
2171 if (net->RTO == 0) {
2172 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2173 } else {
2174 to_ticks = MSEC_TO_TICKS(net->RTO);
2175 }
2176 tmr = &net->rxt_timer;
2177 break;
2178 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2179 /*
2180 * Here we use the endpoints shutdown guard timer usually
2181 * about 3 minutes.
2182 */
2183 if ((inp == NULL) || (stcb == NULL)) {
2184 return;
2185 }
2186 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2187 tmr = &stcb->asoc.shut_guard_timer;
2188 break;
2189 case SCTP_TIMER_TYPE_STRRESET:
2190 /*
2191 * Here the timer comes from the stcb but its value is from
2192 * the net's RTO.
2193 */
2194 if ((stcb == NULL) || (net == NULL)) {
2195 return;
2196 }
2197 if (net->RTO == 0) {
2198 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2199 } else {
2200 to_ticks = MSEC_TO_TICKS(net->RTO);
2201 }
2202 tmr = &stcb->asoc.strreset_timer;
2203 break;
2204
2205 case SCTP_TIMER_TYPE_EARLYFR:
2206 {
2207 unsigned int msec;
2208
2209 if ((stcb == NULL) || (net == NULL)) {
2210 return;
2211 }
2212 if (net->flight_size > net->cwnd) {
2213 /* no need to start */
2214 return;
2215 }
2216 SCTP_STAT_INCR(sctps_earlyfrstart);
2217 if (net->lastsa == 0) {
2218 /* Hmm no rtt estimate yet? */
2219 msec = stcb->asoc.initial_rto >> 2;
2220 } else {
2221 msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2222 }
2223 if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2224 msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2225 if (msec < SCTP_MINFR_MSEC_FLOOR) {
2226 msec = SCTP_MINFR_MSEC_FLOOR;
2227 }
2228 }
2229 to_ticks = MSEC_TO_TICKS(msec);
2230 tmr = &net->fr_timer;
2231 }
2232 break;
2233 case SCTP_TIMER_TYPE_ASCONF:
2234 /*
2235 * Here the timer comes from the stcb but its value is from
2236 * the net's RTO.
2237 */
2238 if ((stcb == NULL) || (net == NULL)) {
2239 return;
2240 }
2241 if (net->RTO == 0) {
2242 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2243 } else {
2244 to_ticks = MSEC_TO_TICKS(net->RTO);
2245 }
2246 tmr = &stcb->asoc.asconf_timer;
2247 break;
2248 case SCTP_TIMER_TYPE_PRIM_DELETED:
2249 if ((stcb == NULL) || (net != NULL)) {
2250 return;
2251 }
2252 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2253 tmr = &stcb->asoc.delete_prim_timer;
2254 break;
2255 case SCTP_TIMER_TYPE_AUTOCLOSE:
2256 if (stcb == NULL) {
2257 return;
2258 }
2259 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2260 /*
2261 * Really an error since stcb is NOT set to
2262 * autoclose
2263 */
2264 return;
2265 }
2266 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2267 tmr = &stcb->asoc.autoclose_timer;
2268 break;
2269 default:
2270 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2271 __FUNCTION__, t_type);
2272 return;
2273 break;
2274 };
2275 if ((to_ticks <= 0) || (tmr == NULL)) {
2276 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2277 __FUNCTION__, t_type, to_ticks, tmr);
2278 return;
2279 }
2280 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2281 /*
2282 * we do NOT allow you to have it already running. if it is
2283 * we leave the current one up unchanged
2284 */
2285 return;
2286 }
2287 /* At this point we can proceed */
2288 if (t_type == SCTP_TIMER_TYPE_SEND) {
2289 stcb->asoc.num_send_timers_up++;
2290 }
2291 tmr->stopped_from = 0;
2292 tmr->type = t_type;
2293 tmr->ep = (void *)inp;
2294 tmr->tcb = (void *)stcb;
2295 tmr->net = (void *)net;
2296 tmr->self = (void *)tmr;
2297 tmr->vnet = (void *)curvnet;
2298 tmr->ticks = sctp_get_tick_count();
2299 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2300 return;
2301 }
2302
2303 void
2304 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2305 struct sctp_nets *net, uint32_t from)
2306 {
2307 struct sctp_timer *tmr;
2308
2309 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2310 (inp == NULL))
2311 return;
2312
2313 tmr = NULL;
2314 if (stcb) {
2315 SCTP_TCB_LOCK_ASSERT(stcb);
2316 }
2317 switch (t_type) {
2318 case SCTP_TIMER_TYPE_ZERO_COPY:
2319 tmr = &inp->sctp_ep.zero_copy_timer;
2320 break;
2321 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2322 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2323 break;
2324 case SCTP_TIMER_TYPE_ADDR_WQ:
2325 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2326 break;
2327 case SCTP_TIMER_TYPE_EARLYFR:
2328 if ((stcb == NULL) || (net == NULL)) {
2329 return;
2330 }
2331 tmr = &net->fr_timer;
2332 SCTP_STAT_INCR(sctps_earlyfrstop);
2333 break;
2334 case SCTP_TIMER_TYPE_SEND:
2335 if ((stcb == NULL) || (net == NULL)) {
2336 return;
2337 }
2338 tmr = &net->rxt_timer;
2339 break;
2340 case SCTP_TIMER_TYPE_INIT:
2341 if ((stcb == NULL) || (net == NULL)) {
2342 return;
2343 }
2344 tmr = &net->rxt_timer;
2345 break;
2346 case SCTP_TIMER_TYPE_RECV:
2347 if (stcb == NULL) {
2348 return;
2349 }
2350 tmr = &stcb->asoc.dack_timer;
2351 break;
2352 case SCTP_TIMER_TYPE_SHUTDOWN:
2353 if ((stcb == NULL) || (net == NULL)) {
2354 return;
2355 }
2356 tmr = &net->rxt_timer;
2357 break;
2358 case SCTP_TIMER_TYPE_HEARTBEAT:
2359 if (stcb == NULL) {
2360 return;
2361 }
2362 tmr = &stcb->asoc.hb_timer;
2363 break;
2364 case SCTP_TIMER_TYPE_COOKIE:
2365 if ((stcb == NULL) || (net == NULL)) {
2366 return;
2367 }
2368 tmr = &net->rxt_timer;
2369 break;
2370 case SCTP_TIMER_TYPE_NEWCOOKIE:
2371 /* nothing needed but the endpoint here */
2372 tmr = &inp->sctp_ep.signature_change;
2373 /*
2374 * We re-use the newcookie timer for the INP kill timer. We
2375 * must assure that we do not kill it by accident.
2376 */
2377 break;
2378 case SCTP_TIMER_TYPE_ASOCKILL:
2379 /*
2380 * Stop the asoc kill timer.
2381 */
2382 if (stcb == NULL) {
2383 return;
2384 }
2385 tmr = &stcb->asoc.strreset_timer;
2386 break;
2387
2388 case SCTP_TIMER_TYPE_INPKILL:
2389 /*
2390 * The inp is setup to die. We re-use the signature_chage
2391 * timer since that has stopped and we are in the GONE
2392 * state.
2393 */
2394 tmr = &inp->sctp_ep.signature_change;
2395 break;
2396 case SCTP_TIMER_TYPE_PATHMTURAISE:
2397 if ((stcb == NULL) || (net == NULL)) {
2398 return;
2399 }
2400 tmr = &net->pmtu_timer;
2401 break;
2402 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2403 if ((stcb == NULL) || (net == NULL)) {
2404 return;
2405 }
2406 tmr = &net->rxt_timer;
2407 break;
2408 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2409 if (stcb == NULL) {
2410 return;
2411 }
2412 tmr = &stcb->asoc.shut_guard_timer;
2413 break;
2414 case SCTP_TIMER_TYPE_STRRESET:
2415 if (stcb == NULL) {
2416 return;
2417 }
2418 tmr = &stcb->asoc.strreset_timer;
2419 break;
2420 case SCTP_TIMER_TYPE_ASCONF:
2421 if (stcb == NULL) {
2422 return;
2423 }
2424 tmr = &stcb->asoc.asconf_timer;
2425 break;
2426 case SCTP_TIMER_TYPE_PRIM_DELETED:
2427 if (stcb == NULL) {
2428 return;
2429 }
2430 tmr = &stcb->asoc.delete_prim_timer;
2431 break;
2432 case SCTP_TIMER_TYPE_AUTOCLOSE:
2433 if (stcb == NULL) {
2434 return;
2435 }
2436 tmr = &stcb->asoc.autoclose_timer;
2437 break;
2438 default:
2439 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2440 __FUNCTION__, t_type);
2441 break;
2442 };
2443 if (tmr == NULL) {
2444 return;
2445 }
2446 if ((tmr->type != t_type) && tmr->type) {
2447 /*
2448 * Ok we have a timer that is under joint use. Cookie timer
2449 * per chance with the SEND timer. We therefore are NOT
2450 * running the timer that the caller wants stopped. So just
2451 * return.
2452 */
2453 return;
2454 }
2455 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2456 stcb->asoc.num_send_timers_up--;
2457 if (stcb->asoc.num_send_timers_up < 0) {
2458 stcb->asoc.num_send_timers_up = 0;
2459 }
2460 }
2461 tmr->self = NULL;
2462 tmr->stopped_from = from;
2463 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2464 return;
2465 }
2466
2467 uint32_t
2468 sctp_calculate_len(struct mbuf *m)
2469 {
2470 uint32_t tlen = 0;
2471 struct mbuf *at;
2472
2473 at = m;
2474 while (at) {
2475 tlen += SCTP_BUF_LEN(at);
2476 at = SCTP_BUF_NEXT(at);
2477 }
2478 return (tlen);
2479 }
2480
2481 void
2482 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2483 struct sctp_association *asoc, uint32_t mtu)
2484 {
2485 /*
2486 * Reset the P-MTU size on this association, this involves changing
2487 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2488 * allow the DF flag to be cleared.
2489 */
2490 struct sctp_tmit_chunk *chk;
2491 unsigned int eff_mtu, ovh;
2492
2493 #ifdef SCTP_PRINT_FOR_B_AND_M
2494 SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2495 inp, asoc, mtu);
2496 #endif
2497 asoc->smallest_mtu = mtu;
2498 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2499 ovh = SCTP_MIN_OVERHEAD;
2500 } else {
2501 ovh = SCTP_MIN_V4_OVERHEAD;
2502 }
2503 eff_mtu = mtu - ovh;
2504 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2505
2506 if (chk->send_size > eff_mtu) {
2507 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2508 }
2509 }
2510 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2511 if (chk->send_size > eff_mtu) {
2512 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2513 }
2514 }
2515 }
2516
2517
2518 /*
2519 * given an association and starting time of the current RTT period return
2520 * RTO in number of msecs net should point to the current network
2521 */
2522 uint32_t
2523 sctp_calculate_rto(struct sctp_tcb *stcb,
2524 struct sctp_association *asoc,
2525 struct sctp_nets *net,
2526 struct timeval *told,
2527 int safe)
2528 {
2529 /*-
2530 * given an association and the starting time of the current RTT
2531 * period (in value1/value2) return RTO in number of msecs.
2532 */
2533 int calc_time = 0;
2534 int o_calctime;
2535 uint32_t new_rto = 0;
2536 int first_measure = 0;
2537 struct timeval now, then, *old;
2538
2539 /* Copy it out for sparc64 */
2540 if (safe == sctp_align_unsafe_makecopy) {
2541 old = &then;
2542 memcpy(&then, told, sizeof(struct timeval));
2543 } else if (safe == sctp_align_safe_nocopy) {
2544 old = told;
2545 } else {
2546 /* error */
2547 SCTP_PRINTF("Huh, bad rto calc call\n");
2548 return (0);
2549 }
2550 /************************/
2551 /* 1. calculate new RTT */
2552 /************************/
2553 /* get the current time */
2554 (void)SCTP_GETTIME_TIMEVAL(&now);
2555 /* compute the RTT value */
2556 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2557 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2558 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2559 calc_time += (((u_long)now.tv_usec -
2560 (u_long)old->tv_usec) / 1000);
2561 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2562 /* Borrow 1,000ms from current calculation */
2563 calc_time -= 1000;
2564 /* Add in the slop over */
2565 calc_time += ((int)now.tv_usec / 1000);
2566 /* Add in the pre-second ms's */
2567 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2568 }
2569 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2570 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2571 calc_time = ((u_long)now.tv_usec -
2572 (u_long)old->tv_usec) / 1000;
2573 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2574 /* impossible .. garbage in nothing out */
2575 goto calc_rto;
2576 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2577 /*
2578 * We have to have 1 usec :-D this must be the
2579 * loopback.
2580 */
2581 calc_time = 1;
2582 } else {
2583 /* impossible .. garbage in nothing out */
2584 goto calc_rto;
2585 }
2586 } else {
2587 /* Clock wrapped? */
2588 goto calc_rto;
2589 }
2590 /***************************/
2591 /* 2. update RTTVAR & SRTT */
2592 /***************************/
2593 net->rtt = o_calctime = calc_time;
2594 /* this is Van Jacobson's integer version */
2595 if (net->RTO_measured) {
2596 calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when
2597 * shift=3 */
2598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2599 rto_logging(net, SCTP_LOG_RTTVAR);
2600 }
2601 net->prev_rtt = o_calctime;
2602 net->lastsa += calc_time; /* add 7/8th into sa when
2603 * shift=3 */
2604 if (calc_time < 0) {
2605 calc_time = -calc_time;
2606 }
2607 calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); /* take away 1/4 when
2608 * VAR shift=2 */
2609 net->lastsv += calc_time;
2610 if (net->lastsv == 0) {
2611 net->lastsv = SCTP_CLOCK_GRANULARITY;
2612 }
2613 } else {
2614 /* First RTO measurment */
2615 net->RTO_measured = 1;
2616 net->lastsa = calc_time << SCTP_RTT_SHIFT; /* Multiply by 8 when
2617 * shift=3 */
2618 net->lastsv = calc_time;
2619 if (net->lastsv == 0) {
2620 net->lastsv = SCTP_CLOCK_GRANULARITY;
2621 }
2622 first_measure = 1;
2623 net->prev_rtt = o_calctime;
2624 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2625 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2626 }
2627 }
2628 calc_rto:
2629 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2630 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2631 (stcb->asoc.sat_network_lockout == 0)) {
2632 stcb->asoc.sat_network = 1;
2633 } else if ((!first_measure) && stcb->asoc.sat_network) {
2634 stcb->asoc.sat_network = 0;
2635 stcb->asoc.sat_network_lockout = 1;
2636 }
2637 /* bound it, per C6/C7 in Section 5.3.1 */
2638 if (new_rto < stcb->asoc.minrto) {
2639 new_rto = stcb->asoc.minrto;
2640 }
2641 if (new_rto > stcb->asoc.maxrto) {
2642 new_rto = stcb->asoc.maxrto;
2643 }
2644 /* we are now returning the RTO */
2645 return (new_rto);
2646 }
2647
2648 /*
2649 * return a pointer to a contiguous piece of data from the given mbuf chain
2650 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2651 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2652 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2653 */
2654 caddr_t
2655 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2656 {
2657 uint32_t count;
2658 uint8_t *ptr;
2659
2660 ptr = in_ptr;
2661 if ((off < 0) || (len <= 0))
2662 return (NULL);
2663
2664 /* find the desired start location */
2665 while ((m != NULL) && (off > 0)) {
2666 if (off < SCTP_BUF_LEN(m))
2667 break;
2668 off -= SCTP_BUF_LEN(m);
2669 m = SCTP_BUF_NEXT(m);
2670 }
2671 if (m == NULL)
2672 return (NULL);
2673
2674 /* is the current mbuf large enough (eg. contiguous)? */
2675 if ((SCTP_BUF_LEN(m) - off) >= len) {
2676 return (mtod(m, caddr_t)+off);
2677 } else {
2678 /* else, it spans more than one mbuf, so save a temp copy... */
2679 while ((m != NULL) && (len > 0)) {
2680 count = min(SCTP_BUF_LEN(m) - off, len);
2681 bcopy(mtod(m, caddr_t)+off, ptr, count);
2682 len -= count;
2683 ptr += count;
2684 off = 0;
2685 m = SCTP_BUF_NEXT(m);
2686 }
2687 if ((m == NULL) && (len > 0))
2688 return (NULL);
2689 else
2690 return ((caddr_t)in_ptr);
2691 }
2692 }
2693
2694
2695
2696 struct sctp_paramhdr *
2697 sctp_get_next_param(struct mbuf *m,
2698 int offset,
2699 struct sctp_paramhdr *pull,
2700 int pull_limit)
2701 {
2702 /* This just provides a typed signature to Peter's Pull routine */
2703 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2704 (uint8_t *) pull));
2705 }
2706
2707
2708 int
2709 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2710 {
2711 /*
2712 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2713 * padlen is > 3 this routine will fail.
2714 */
2715 uint8_t *dp;
2716 int i;
2717
2718 if (padlen > 3) {
2719 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2720 return (ENOBUFS);
2721 }
2722 if (padlen <= M_TRAILINGSPACE(m)) {
2723 /*
2724 * The easy way. We hope the majority of the time we hit
2725 * here :)
2726 */
2727 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2728 SCTP_BUF_LEN(m) += padlen;
2729 } else {
2730 /* Hard way we must grow the mbuf */
2731 struct mbuf *tmp;
2732
2733 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2734 if (tmp == NULL) {
2735 /* Out of space GAK! we are in big trouble. */
2736 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2737 return (ENOSPC);
2738 }
2739 /* setup and insert in middle */
2740 SCTP_BUF_LEN(tmp) = padlen;
2741 SCTP_BUF_NEXT(tmp) = NULL;
2742 SCTP_BUF_NEXT(m) = tmp;
2743 dp = mtod(tmp, uint8_t *);
2744 }
2745 /* zero out the pad */
2746 for (i = 0; i < padlen; i++) {
2747 *dp = 0;
2748 dp++;
2749 }
2750 return (0);
2751 }
2752
2753 int
2754 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2755 {
2756 /* find the last mbuf in chain and pad it */
2757 struct mbuf *m_at;
2758
2759 m_at = m;
2760 if (last_mbuf) {
2761 return (sctp_add_pad_tombuf(last_mbuf, padval));
2762 } else {
2763 while (m_at) {
2764 if (SCTP_BUF_NEXT(m_at) == NULL) {
2765 return (sctp_add_pad_tombuf(m_at, padval));
2766 }
2767 m_at = SCTP_BUF_NEXT(m_at);
2768 }
2769 }
2770 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2771 return (EFAULT);
2772 }
2773
2774 static void
2775 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2776 uint32_t error, void *data, int so_locked
2777 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2778 SCTP_UNUSED
2779 #endif
2780 )
2781 {
2782 struct mbuf *m_notify;
2783 struct sctp_assoc_change *sac;
2784 struct sctp_queued_to_read *control;
2785
2786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2787 struct socket *so;
2788
2789 #endif
2790
2791 /*
2792 * For TCP model AND UDP connected sockets we will send an error up
2793 * when an ABORT comes in.
2794 */
2795 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2796 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2797 ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2798 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2799 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2800 stcb->sctp_socket->so_error = ECONNREFUSED;
2801 } else {
2802 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2803 stcb->sctp_socket->so_error = ECONNRESET;
2804 }
2805 /* Wake ANY sleepers */
2806 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2807 so = SCTP_INP_SO(stcb->sctp_ep);
2808 if (!so_locked) {
2809 atomic_add_int(&stcb->asoc.refcnt, 1);
2810 SCTP_TCB_UNLOCK(stcb);
2811 SCTP_SOCKET_LOCK(so, 1);
2812 SCTP_TCB_LOCK(stcb);
2813 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2814 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2815 SCTP_SOCKET_UNLOCK(so, 1);
2816 return;
2817 }
2818 }
2819 #endif
2820 sorwakeup(stcb->sctp_socket);
2821 sowwakeup(stcb->sctp_socket);
2822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2823 if (!so_locked) {
2824 SCTP_SOCKET_UNLOCK(so, 1);
2825 }
2826 #endif
2827 }
2828 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2829 /* event not enabled */
2830 return;
2831 }
2832 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2833 if (m_notify == NULL)
2834 /* no space left */
2835 return;
2836 SCTP_BUF_LEN(m_notify) = 0;
2837
2838 sac = mtod(m_notify, struct sctp_assoc_change *);
2839 sac->sac_type = SCTP_ASSOC_CHANGE;
2840 sac->sac_flags = 0;
2841 sac->sac_length = sizeof(struct sctp_assoc_change);
2842 sac->sac_state = event;
2843 sac->sac_error = error;
2844 /* XXX verify these stream counts */
2845 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2846 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2847 sac->sac_assoc_id = sctp_get_associd(stcb);
2848 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2849 SCTP_BUF_NEXT(m_notify) = NULL;
2850 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2851 0, 0, 0, 0, 0, 0,
2852 m_notify);
2853 if (control == NULL) {
2854 /* no memory */
2855 sctp_m_freem(m_notify);
2856 return;
2857 }
2858 control->length = SCTP_BUF_LEN(m_notify);
2859 /* not that we need this */
2860 control->tail_mbuf = m_notify;
2861 control->spec_flags = M_NOTIFICATION;
2862 sctp_add_to_readq(stcb->sctp_ep, stcb,
2863 control,
2864 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2865 so_locked);
2866 if (event == SCTP_COMM_LOST) {
2867 /* Wake up any sleeper */
2868 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2869 so = SCTP_INP_SO(stcb->sctp_ep);
2870 if (!so_locked) {
2871 atomic_add_int(&stcb->asoc.refcnt, 1);
2872 SCTP_TCB_UNLOCK(stcb);
2873 SCTP_SOCKET_LOCK(so, 1);
2874 SCTP_TCB_LOCK(stcb);
2875 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2876 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2877 SCTP_SOCKET_UNLOCK(so, 1);
2878 return;
2879 }
2880 }
2881 #endif
2882 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2883 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2884 if (!so_locked) {
2885 SCTP_SOCKET_UNLOCK(so, 1);
2886 }
2887 #endif
2888 }
2889 }
2890
2891 static void
2892 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2893 struct sockaddr *sa, uint32_t error)
2894 {
2895 struct mbuf *m_notify;
2896 struct sctp_paddr_change *spc;
2897 struct sctp_queued_to_read *control;
2898
2899 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2900 /* event not enabled */
2901 return;
2902 }
2903 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2904 if (m_notify == NULL)
2905 return;
2906 SCTP_BUF_LEN(m_notify) = 0;
2907 spc = mtod(m_notify, struct sctp_paddr_change *);
2908 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2909 spc->spc_flags = 0;
2910 spc->spc_length = sizeof(struct sctp_paddr_change);
2911 switch (sa->sa_family) {
2912 case AF_INET:
2913 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2914 break;
2915 #ifdef INET6
2916 case AF_INET6:
2917 {
2918 struct sockaddr_in6 *sin6;
2919
2920 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2921
2922 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2923 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2924 if (sin6->sin6_scope_id == 0) {
2925 /* recover scope_id for user */
2926 (void)sa6_recoverscope(sin6);
2927 } else {
2928 /* clear embedded scope_id for user */
2929 in6_clearscope(&sin6->sin6_addr);
2930 }
2931 }
2932 break;
2933 }
2934 #endif
2935 default:
2936 /* TSNH */
2937 break;
2938 }
2939 spc->spc_state = state;
2940 spc->spc_error = error;
2941 spc->spc_assoc_id = sctp_get_associd(stcb);
2942
2943 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2944 SCTP_BUF_NEXT(m_notify) = NULL;
2945
2946 /* append to socket */
2947 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2948 0, 0, 0, 0, 0, 0,
2949 m_notify);
2950 if (control == NULL) {
2951 /* no memory */
2952 sctp_m_freem(m_notify);
2953 return;
2954 }
2955 control->length = SCTP_BUF_LEN(m_notify);
2956 control->spec_flags = M_NOTIFICATION;
2957 /* not that we need this */
2958 control->tail_mbuf = m_notify;
2959 sctp_add_to_readq(stcb->sctp_ep, stcb,
2960 control,
2961 &stcb->sctp_socket->so_rcv, 1,
2962 SCTP_READ_LOCK_NOT_HELD,
2963 SCTP_SO_NOT_LOCKED);
2964 }
2965
2966
2967 static void
2968 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2969 struct sctp_tmit_chunk *chk, int so_locked
2970 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2971 SCTP_UNUSED
2972 #endif
2973 )
2974 {
2975 struct mbuf *m_notify;
2976 struct sctp_send_failed *ssf;
2977 struct sctp_queued_to_read *control;
2978 int length;
2979
2980 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2981 /* event not enabled */
2982 return;
2983 }
2984 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2985 if (m_notify == NULL)
2986 /* no space left */
2987 return;
2988 length = sizeof(struct sctp_send_failed) + chk->send_size;
2989 length -= sizeof(struct sctp_data_chunk);
2990 SCTP_BUF_LEN(m_notify) = 0;
2991 ssf = mtod(m_notify, struct sctp_send_failed *);
2992 ssf->ssf_type = SCTP_SEND_FAILED;
2993 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2994 ssf->ssf_flags = SCTP_DATA_UNSENT;
2995 else
2996 ssf->ssf_flags = SCTP_DATA_SENT;
2997 ssf->ssf_length = length;
2998 ssf->ssf_error = error;
2999 /* not exactly what the user sent in, but should be close :) */
3000 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3001 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3002 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3003 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3004 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3005 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3006 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3007 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3008
3009 if (chk->data) {
3010 /*
3011 * trim off the sctp chunk header(it should be there)
3012 */
3013 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3014 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3015 sctp_mbuf_crush(chk->data);
3016 chk->send_size -= sizeof(struct sctp_data_chunk);
3017 }
3018 }
3019 SCTP_BUF_NEXT(m_notify) = chk->data;
3020 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3021 /* Steal off the mbuf */
3022 chk->data = NULL;
3023 /*
3024 * For this case, we check the actual socket buffer, since the assoc
3025 * is going away we don't want to overfill the socket buffer for a
3026 * non-reader
3027 */
3028 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3029 sctp_m_freem(m_notify);
3030 return;
3031 }
3032 /* append to socket */
3033 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3034 0, 0, 0, 0, 0, 0,
3035 m_notify);
3036 if (control == NULL) {
3037 /* no memory */
3038 sctp_m_freem(m_notify);
3039 return;
3040 }
3041 control->spec_flags = M_NOTIFICATION;
3042 sctp_add_to_readq(stcb->sctp_ep, stcb,
3043 control,
3044 &stcb->sctp_socket->so_rcv, 1,
3045 SCTP_READ_LOCK_NOT_HELD,
3046 so_locked);
3047 }
3048
3049
3050 static void
3051 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3052 struct sctp_stream_queue_pending *sp, int so_locked
3053 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3054 SCTP_UNUSED
3055 #endif
3056 )
3057 {
3058 struct mbuf *m_notify;
3059 struct sctp_send_failed *ssf;
3060 struct sctp_queued_to_read *control;
3061 int length;
3062
3063 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3064 /* event not enabled */
3065 return;
3066 }
3067 length = sizeof(struct sctp_send_failed) + sp->length;
3068 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3069 if (m_notify == NULL)
3070 /* no space left */
3071 return;
3072 SCTP_BUF_LEN(m_notify) = 0;
3073 ssf = mtod(m_notify, struct sctp_send_failed *);
3074 ssf->ssf_type = SCTP_SEND_FAILED;
3075 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3076 ssf->ssf_flags = SCTP_DATA_UNSENT;
3077 else
3078 ssf->ssf_flags = SCTP_DATA_SENT;
3079 ssf->ssf_length = length;
3080 ssf->ssf_error = error;
3081 /* not exactly what the user sent in, but should be close :) */
3082 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3083 ssf->ssf_info.sinfo_stream = sp->stream;
3084 ssf->ssf_info.sinfo_ssn = sp->strseq;
3085 if (sp->some_taken) {
3086 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3087 } else {
3088 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3089 }
3090 ssf->ssf_info.sinfo_ppid = sp->ppid;
3091 ssf->ssf_info.sinfo_context = sp->context;
3092 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3093 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3094 SCTP_BUF_NEXT(m_notify) = sp->data;
3095 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3096
3097 /* Steal off the mbuf */
3098 sp->data = NULL;
3099 /*
3100 * For this case, we check the actual socket buffer, since the assoc
3101 * is going away we don't want to overfill the socket buffer for a
3102 * non-reader
3103 */
3104 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3105 sctp_m_freem(m_notify);
3106 return;
3107 }
3108 /* append to socket */
3109 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3110 0, 0, 0, 0, 0, 0,
3111 m_notify);
3112 if (control == NULL) {
3113 /* no memory */
3114 sctp_m_freem(m_notify);
3115 return;
3116 }
3117 control->spec_flags = M_NOTIFICATION;
3118 sctp_add_to_readq(stcb->sctp_ep, stcb,
3119 control,
3120 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3121 }
3122
3123
3124
3125 static void
3126 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3127 uint32_t error)
3128 {
3129 struct mbuf *m_notify;
3130 struct sctp_adaptation_event *sai;
3131 struct sctp_queued_to_read *control;
3132
3133 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3134 /* event not enabled */
3135 return;
3136 }
3137 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3138 if (m_notify == NULL)
3139 /* no space left */
3140 return;
3141 SCTP_BUF_LEN(m_notify) = 0;
3142 sai = mtod(m_notify, struct sctp_adaptation_event *);
3143 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3144 sai->sai_flags = 0;
3145 sai->sai_length = sizeof(struct sctp_adaptation_event);
3146 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3147 sai->sai_assoc_id = sctp_get_associd(stcb);
3148
3149 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3150 SCTP_BUF_NEXT(m_notify) = NULL;
3151
3152 /* append to socket */
3153 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3154 0, 0, 0, 0, 0, 0,
3155 m_notify);
3156 if (control == NULL) {
3157 /* no memory */
3158 sctp_m_freem(m_notify);
3159 return;
3160 }
3161 control->length = SCTP_BUF_LEN(m_notify);
3162 control->spec_flags = M_NOTIFICATION;
3163 /* not that we need this */
3164 control->tail_mbuf = m_notify;
3165 sctp_add_to_readq(stcb->sctp_ep, stcb,
3166 control,
3167 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3168 }
3169
3170 /* This always must be called with the read-queue LOCKED in the INP */
3171 static void
3172 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3173 uint32_t val, int so_locked
3174 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3175 SCTP_UNUSED
3176 #endif
3177 )
3178 {
3179 struct mbuf *m_notify;
3180 struct sctp_pdapi_event *pdapi;
3181 struct sctp_queued_to_read *control;
3182 struct sockbuf *sb;
3183
3184 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3185 /* event not enabled */
3186 return;
3187 }
3188 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3189 if (m_notify == NULL)
3190 /* no space left */
3191 return;
3192 SCTP_BUF_LEN(m_notify) = 0;
3193 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3194 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3195 pdapi->pdapi_flags = 0;
3196 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3197 pdapi->pdapi_indication = error;
3198 pdapi->pdapi_stream = (val >> 16);
3199 pdapi->pdapi_seq = (val & 0x0000ffff);
3200 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3201
3202 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3203 SCTP_BUF_NEXT(m_notify) = NULL;
3204 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3205 0, 0, 0, 0, 0, 0,
3206 m_notify);
3207 if (control == NULL) {
3208 /* no memory */
3209 sctp_m_freem(m_notify);
3210 return;
3211 }
3212 control->spec_flags = M_NOTIFICATION;
3213 control->length = SCTP_BUF_LEN(m_notify);
3214 /* not that we need this */
3215 control->tail_mbuf = m_notify;
3216 control->held_length = 0;
3217 control->length = 0;
3218 sb = &stcb->sctp_socket->so_rcv;
3219 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3220 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3221 }
3222 sctp_sballoc(stcb, sb, m_notify);
3223 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3224 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3225 }
3226 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3227 control->end_added = 1;
3228 if (stcb->asoc.control_pdapi)
3229 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3230 else {
3231 /* we really should not see this case */
3232 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3233 }
3234 if (stcb->sctp_ep && stcb->sctp_socket) {
3235 /* This should always be the case */
3236 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3237 struct socket *so;
3238
3239 so = SCTP_INP_SO(stcb->sctp_ep);
3240 if (!so_locked) {
3241 atomic_add_int(&stcb->asoc.refcnt, 1);
3242 SCTP_TCB_UNLOCK(stcb);
3243 SCTP_SOCKET_LOCK(so, 1);
3244 SCTP_TCB_LOCK(stcb);
3245 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3246 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3247 SCTP_SOCKET_UNLOCK(so, 1);
3248 return;
3249 }
3250 }
3251 #endif
3252 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3253 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3254 if (!so_locked) {
3255 SCTP_SOCKET_UNLOCK(so, 1);
3256 }
3257 #endif
3258 }
3259 }
3260
3261 static void
3262 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3263 {
3264 struct mbuf *m_notify;
3265 struct sctp_shutdown_event *sse;
3266 struct sctp_queued_to_read *control;
3267
3268 /*
3269 * For TCP model AND UDP connected sockets we will send an error up
3270 * when an SHUTDOWN completes
3271 */
3272 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3274 /* mark socket closed for read/write and wakeup! */
3275 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3276 struct socket *so;
3277
3278 so = SCTP_INP_SO(stcb->sctp_ep);
3279 atomic_add_int(&stcb->asoc.refcnt, 1);
3280 SCTP_TCB_UNLOCK(stcb);
3281 SCTP_SOCKET_LOCK(so, 1);
3282 SCTP_TCB_LOCK(stcb);
3283 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3284 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3285 SCTP_SOCKET_UNLOCK(so, 1);
3286 return;
3287 }
3288 #endif
3289 socantsendmore(stcb->sctp_socket);
3290 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3291 SCTP_SOCKET_UNLOCK(so, 1);
3292 #endif
3293 }
3294 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3295 /* event not enabled */
3296 return;
3297 }
3298 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3299 if (m_notify == NULL)
3300 /* no space left */
3301 return;
3302 sse = mtod(m_notify, struct sctp_shutdown_event *);
3303 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3304 sse->sse_flags = 0;
3305 sse->sse_length = sizeof(struct sctp_shutdown_event);
3306 sse->sse_assoc_id = sctp_get_associd(stcb);
3307
3308 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3309 SCTP_BUF_NEXT(m_notify) = NULL;
3310
3311 /* append to socket */
3312 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3313 0, 0, 0, 0, 0, 0,
3314 m_notify);
3315 if (control == NULL) {
3316 /* no memory */
3317 sctp_m_freem(m_notify);
3318 return;
3319 }
3320 control->spec_flags = M_NOTIFICATION;
3321 control->length = SCTP_BUF_LEN(m_notify);
3322 /* not that we need this */
3323 control->tail_mbuf = m_notify;
3324 sctp_add_to_readq(stcb->sctp_ep, stcb,
3325 control,
3326 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3327 }
3328
3329 static void
3330 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3331 int so_locked
3332 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3333 SCTP_UNUSED
3334 #endif
3335 )
3336 {
3337 struct mbuf *m_notify;
3338 struct sctp_sender_dry_event *event;
3339 struct sctp_queued_to_read *control;
3340
3341 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3342 /* event not enabled */
3343 return;
3344 }
3345 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3346 if (m_notify == NULL) {
3347 /* no space left */
3348 return;
3349 }
3350 SCTP_BUF_LEN(m_notify) = 0;
3351 event = mtod(m_notify, struct sctp_sender_dry_event *);
3352 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3353 event->sender_dry_flags = 0;
3354 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3355 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3356
3357 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3358 SCTP_BUF_NEXT(m_notify) = NULL;
3359
3360 /* append to socket */
3361 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3362 0, 0, 0, 0, 0, 0, m_notify);
3363 if (control == NULL) {
3364 /* no memory */
3365 sctp_m_freem(m_notify);
3366 return;
3367 }
3368 control->length = SCTP_BUF_LEN(m_notify);
3369 control->spec_flags = M_NOTIFICATION;
3370 /* not that we need this */
3371 control->tail_mbuf = m_notify;
3372 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3373 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3374 }
3375
3376
3377 static void
3378 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3379 {
3380 struct mbuf *m_notify;
3381 struct sctp_queued_to_read *control;
3382 struct sctp_stream_reset_event *strreset;
3383 int len;
3384
3385 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3386 /* event not enabled */
3387 return;
3388 }
3389 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3390 if (m_notify == NULL)
3391 /* no space left */
3392 return;
3393 SCTP_BUF_LEN(m_notify) = 0;
3394 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3395 if (len > M_TRAILINGSPACE(m_notify)) {
3396 /* never enough room */
3397 sctp_m_freem(m_notify);
3398 return;
3399 }
3400 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3401 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3402 strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3403 strreset->strreset_length = len;
3404 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3405 strreset->strreset_list[0] = number_entries;
3406
3407 SCTP_BUF_LEN(m_notify) = len;
3408 SCTP_BUF_NEXT(m_notify) = NULL;
3409 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3410 /* no space */
3411 sctp_m_freem(m_notify);
3412 return;
3413 }
3414 /* append to socket */
3415 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3416 0, 0, 0, 0, 0, 0,
3417 m_notify);
3418 if (control == NULL) {
3419 /* no memory */
3420 sctp_m_freem(m_notify);
3421 return;
3422 }
3423 control->spec_flags = M_NOTIFICATION;
3424 control->length = SCTP_BUF_LEN(m_notify);
3425 /* not that we need this */
3426 control->tail_mbuf = m_notify;
3427 sctp_add_to_readq(stcb->sctp_ep, stcb,
3428 control,
3429 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3430 }
3431
3432
3433 static void
3434 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3435 int number_entries, uint16_t * list, int flag)
3436 {
3437 struct mbuf *m_notify;
3438 struct sctp_queued_to_read *control;
3439 struct sctp_stream_reset_event *strreset;
3440 int len;
3441
3442 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3443 /* event not enabled */
3444 return;
3445 }
3446 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3447 if (m_notify == NULL)
3448 /* no space left */
3449 return;
3450 SCTP_BUF_LEN(m_notify) = 0;
3451 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3452 if (len > M_TRAILINGSPACE(m_notify)) {
3453 /* never enough room */
3454 sctp_m_freem(m_notify);
3455 return;
3456 }
3457 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3458 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3459 if (number_entries == 0) {
3460 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3461 } else {
3462 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3463 }
3464 strreset->strreset_length = len;
3465 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3466 if (number_entries) {
3467 int i;
3468
3469 for (i = 0; i < number_entries; i++) {
3470 strreset->strreset_list[i] = ntohs(list[i]);
3471 }
3472 }
3473 SCTP_BUF_LEN(m_notify) = len;
3474 SCTP_BUF_NEXT(m_notify) = NULL;
3475 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3476 /* no space */
3477 sctp_m_freem(m_notify);
3478 return;
3479 }
3480 /* append to socket */
3481 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3482 0, 0, 0, 0, 0, 0,
3483 m_notify);
3484 if (control == NULL) {
3485 /* no memory */
3486 sctp_m_freem(m_notify);
3487 return;
3488 }
3489 control->spec_flags = M_NOTIFICATION;
3490 control->length = SCTP_BUF_LEN(m_notify);
3491 /* not that we need this */
3492 control->tail_mbuf = m_notify;
3493 sctp_add_to_readq(stcb->sctp_ep, stcb,
3494 control,
3495 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3496 }
3497
3498
3499 void
3500 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3501 uint32_t error, void *data, int so_locked
3502 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3503 SCTP_UNUSED
3504 #endif
3505 )
3506 {
3507 if ((stcb == NULL) ||
3508 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3509 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3510 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3511 /* If the socket is gone we are out of here */
3512 return;
3513 }
3514 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3515 return;
3516 }
3517 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3518 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3519 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3520 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3521 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3522 /* Don't report these in front states */
3523 return;
3524 }
3525 }
3526 switch (notification) {
3527 case SCTP_NOTIFY_ASSOC_UP:
3528 if (stcb->asoc.assoc_up_sent == 0) {
3529 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3530 stcb->asoc.assoc_up_sent = 1;
3531 }
3532 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3533 sctp_notify_adaptation_layer(stcb, error);
3534 }
3535 if (stcb->asoc.peer_supports_auth == 0) {
3536 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3537 NULL, so_locked);
3538 }
3539 break;
3540 case SCTP_NOTIFY_ASSOC_DOWN:
3541 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3542 break;
3543 case SCTP_NOTIFY_INTERFACE_DOWN:
3544 {
3545 struct sctp_nets *net;
3546
3547 net = (struct sctp_nets *)data;
3548 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3549 (struct sockaddr *)&net->ro._l_addr, error);
3550 break;
3551 }
3552 case SCTP_NOTIFY_INTERFACE_UP:
3553 {
3554 struct sctp_nets *net;
3555
3556 net = (struct sctp_nets *)data;
3557 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3558 (struct sockaddr *)&net->ro._l_addr, error);
3559 break;
3560 }
3561 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3562 {
3563 struct sctp_nets *net;
3564
3565 net = (struct sctp_nets *)data;
3566 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3567 (struct sockaddr *)&net->ro._l_addr, error);
3568 break;
3569 }
3570 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3571 sctp_notify_send_failed2(stcb, error,
3572 (struct sctp_stream_queue_pending *)data, so_locked);
3573 break;
3574 case SCTP_NOTIFY_DG_FAIL:
3575 sctp_notify_send_failed(stcb, error,
3576 (struct sctp_tmit_chunk *)data, so_locked);
3577 break;
3578 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3579 {
3580 uint32_t val;
3581
3582 val = *((uint32_t *) data);
3583
3584 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3585 break;
3586 }
3587 case SCTP_NOTIFY_STRDATA_ERR:
3588 break;
3589 case SCTP_NOTIFY_ASSOC_ABORTED:
3590 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3591 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3592 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3593 } else {
3594 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3595 }
3596 break;
3597 case SCTP_NOTIFY_PEER_OPENED_STREAM:
3598 break;
3599 case SCTP_NOTIFY_STREAM_OPENED_OK:
3600 break;
3601 case SCTP_NOTIFY_ASSOC_RESTART:
3602 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3603 if (stcb->asoc.peer_supports_auth == 0) {
3604 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3605 NULL, so_locked);
3606 }
3607 break;
3608 case SCTP_NOTIFY_HB_RESP:
3609 break;
3610 case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3611 sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3612 break;
3613 case SCTP_NOTIFY_STR_RESET_ADD_OK:
3614 sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3615 break;
3616 case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3617 sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3618 break;
3619
3620 case SCTP_NOTIFY_STR_RESET_SEND:
3621 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3622 break;
3623 case SCTP_NOTIFY_STR_RESET_RECV:
3624 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3625 break;
3626 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3627 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3628 break;
3629 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3630 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3631 break;
3632 case SCTP_NOTIFY_ASCONF_ADD_IP:
3633 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3634 error);
3635 break;
3636 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3637 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3638 error);
3639 break;
3640 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3641 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3642 error);
3643 break;
3644 case SCTP_NOTIFY_ASCONF_SUCCESS:
3645 break;
3646 case SCTP_NOTIFY_ASCONF_FAILED:
3647 break;
3648 case SCTP_NOTIFY_PEER_SHUTDOWN:
3649 sctp_notify_shutdown_event(stcb);
3650 break;
3651 case SCTP_NOTIFY_AUTH_NEW_KEY:
3652 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3653 (uint16_t) (uintptr_t) data,
3654 so_locked);
3655 break;
3656 case SCTP_NOTIFY_AUTH_FREE_KEY:
3657 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3658 (uint16_t) (uintptr_t) data,
3659 so_locked);
3660 break;
3661 case SCTP_NOTIFY_NO_PEER_AUTH:
3662 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3663 (uint16_t) (uintptr_t) data,
3664 so_locked);
3665 break;
3666 case SCTP_NOTIFY_SENDER_DRY:
3667 sctp_notify_sender_dry_event(stcb, so_locked);
3668 break;
3669 default:
3670 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3671 __FUNCTION__, notification, notification);
3672 break;
3673 } /* end switch */
3674 }
3675
3676 void
3677 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3678 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3679 SCTP_UNUSED
3680 #endif
3681 )
3682 {
3683 struct sctp_association *asoc;
3684 struct sctp_stream_out *outs;
3685 struct sctp_tmit_chunk *chk;
3686 struct sctp_stream_queue_pending *sp;
3687 int i;
3688
3689 asoc = &stcb->asoc;
3690
3691 if (stcb == NULL) {
3692 return;
3693 }
3694 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3695 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3696 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3697 return;
3698 }
3699 /* now through all the gunk freeing chunks */
3700 if (holds_lock == 0) {
3701 SCTP_TCB_SEND_LOCK(stcb);
3702 }
3703 /* sent queue SHOULD be empty */
3704 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3705 chk = TAILQ_FIRST(&asoc->sent_queue);
3706 while (chk) {
3707 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3708 asoc->sent_queue_cnt--;
3709 if (chk->data != NULL) {
3710 sctp_free_bufspace(stcb, asoc, chk, 1);
3711 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3712 SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3713 if (chk->data) {
3714 sctp_m_freem(chk->data);
3715 chk->data = NULL;
3716 }
3717 }
3718 sctp_free_a_chunk(stcb, chk);
3719 /* sa_ignore FREED_MEMORY */
3720 chk = TAILQ_FIRST(&asoc->sent_queue);
3721 }
3722 }
3723 /* pending send queue SHOULD be empty */
3724 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3725 chk = TAILQ_FIRST(&asoc->send_queue);
3726 while (chk) {
3727 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3728 asoc->send_queue_cnt--;
3729 if (chk->data != NULL) {
3730 sctp_free_bufspace(stcb, asoc, chk, 1);
3731 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3732 SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3733 if (chk->data) {
3734 sctp_m_freem(chk->data);
3735 chk->data = NULL;
3736 }
3737 }
3738 sctp_free_a_chunk(stcb, chk);
3739 /* sa_ignore FREED_MEMORY */
3740 chk = TAILQ_FIRST(&asoc->send_queue);
3741 }
3742 }
3743 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3744 /* For each stream */
3745 outs = &stcb->asoc.strmout[i];
3746 /* clean up any sends there */
3747 stcb->asoc.locked_on_sending = NULL;
3748 sp = TAILQ_FIRST(&outs->outqueue);
3749 while (sp) {
3750 stcb->asoc.stream_queue_cnt--;
3751 TAILQ_REMOVE(&outs->outqueue, sp, next);
3752 sctp_free_spbufspace(stcb, asoc, sp);
3753 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3754 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3755 if (sp->data) {
3756 sctp_m_freem(sp->data);
3757 sp->data = NULL;
3758 }
3759 if (sp->net)
3760 sctp_free_remote_addr(sp->net);
3761 sp->net = NULL;
3762 /* Free the chunk */
3763 sctp_free_a_strmoq(stcb, sp);
3764 /* sa_ignore FREED_MEMORY */
3765 sp = TAILQ_FIRST(&outs->outqueue);
3766 }
3767 }
3768
3769 if (holds_lock == 0) {
3770 SCTP_TCB_SEND_UNLOCK(stcb);
3771 }
3772 }
3773
3774 void
3775 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3776 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3777 SCTP_UNUSED
3778 #endif
3779 )
3780 {
3781
3782 if (stcb == NULL) {
3783 return;
3784 }
3785 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3786 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3787 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3788 return;
3789 }
3790 /* Tell them we lost the asoc */
3791 sctp_report_all_outbound(stcb, 1, so_locked);
3792 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3793 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3794 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3795 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3796 }
3797 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3798 }
3799
3800 void
3801 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3802 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3803 uint32_t vrf_id, uint16_t port)
3804 {
3805 uint32_t vtag;
3806
3807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3808 struct socket *so;
3809
3810 #endif
3811
3812 vtag = 0;
3813 if (stcb != NULL) {
3814 /* We have a TCB to abort, send notification too */
3815 vtag = stcb->asoc.peer_vtag;
3816 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3817 /* get the assoc vrf id and table id */
3818 vrf_id = stcb->asoc.vrf_id;
3819 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3820 }
3821 sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3822 if (stcb != NULL) {
3823 /* Ok, now lets free it */
3824 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3825 so = SCTP_INP_SO(inp);
3826 atomic_add_int(&stcb->asoc.refcnt, 1);
3827 SCTP_TCB_UNLOCK(stcb);
3828 SCTP_SOCKET_LOCK(so, 1);
3829 SCTP_TCB_LOCK(stcb);
3830 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3831 #endif
3832 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3834 SCTP_SOCKET_UNLOCK(so, 1);
3835 #endif
3836 }
3837 }
3838
3839 #ifdef SCTP_ASOCLOG_OF_TSNS
3840 void
3841 sctp_print_out_track_log(struct sctp_tcb *stcb)
3842 {
3843 #ifdef NOSIY_PRINTS
3844 int i;
3845
3846 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3847 SCTP_PRINTF("IN bound TSN log-aaa\n");
3848 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3849 SCTP_PRINTF("None rcvd\n");
3850 goto none_in;
3851 }
3852 if (stcb->asoc.tsn_in_wrapped) {
3853 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3854 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3855 stcb->asoc.in_tsnlog[i].tsn,
3856 stcb->asoc.in_tsnlog[i].strm,
3857 stcb->asoc.in_tsnlog[i].seq,
3858 stcb->asoc.in_tsnlog[i].flgs,
3859 stcb->asoc.in_tsnlog[i].sz);
3860 }
3861 }
3862 if (stcb->asoc.tsn_in_at) {
3863 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3864 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3865 stcb->asoc.in_tsnlog[i].tsn,
3866 stcb->asoc.in_tsnlog[i].strm,
3867 stcb->asoc.in_tsnlog[i].seq,
3868 stcb->asoc.in_tsnlog[i].flgs,
3869 stcb->asoc.in_tsnlog[i].sz);
3870 }
3871 }
3872 none_in:
3873 SCTP_PRINTF("OUT bound TSN log-aaa\n");
3874 if ((stcb->asoc.tsn_out_at == 0) &&
3875 (stcb->asoc.tsn_out_wrapped == 0)) {
3876 SCTP_PRINTF("None sent\n");
3877 }
3878 if (stcb->asoc.tsn_out_wrapped) {
3879 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3880 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3881 stcb->asoc.out_tsnlog[i].tsn,
3882 stcb->asoc.out_tsnlog[i].strm,
3883 stcb->asoc.out_tsnlog[i].seq,
3884 stcb->asoc.out_tsnlog[i].flgs,
3885 stcb->asoc.out_tsnlog[i].sz);
3886 }
3887 }
3888 if (stcb->asoc.tsn_out_at) {
3889 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3890 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3891 stcb->asoc.out_tsnlog[i].tsn,
3892 stcb->asoc.out_tsnlog[i].strm,
3893 stcb->asoc.out_tsnlog[i].seq,
3894 stcb->asoc.out_tsnlog[i].flgs,
3895 stcb->asoc.out_tsnlog[i].sz);
3896 }
3897 }
3898 #endif
3899 }
3900
3901 #endif
3902
3903 void
3904 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3905 int error, struct mbuf *op_err,
3906 int so_locked
3907 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3908 SCTP_UNUSED
3909 #endif
3910 )
3911 {
3912 uint32_t vtag;
3913
3914 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3915 struct socket *so;
3916
3917 #endif
3918
3919 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3920 so = SCTP_INP_SO(inp);
3921 #endif
3922 if (stcb == NULL) {
3923 /* Got to have a TCB */
3924 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3925 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3926 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3927 SCTP_CALLED_DIRECTLY_NOCMPSET);
3928 }
3929 }
3930 return;
3931 } else {
3932 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3933 }
3934 vtag = stcb->asoc.peer_vtag;
3935 /* notify the ulp */
3936 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3937 sctp_abort_notification(stcb, error, so_locked);
3938 /* notify the peer */
3939 #if defined(SCTP_PANIC_ON_ABORT)
3940 panic("aborting an association");
3941 #endif
3942 sctp_send_abort_tcb(stcb, op_err, so_locked);
3943 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3944 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3945 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3946 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3947 }
3948 /* now free the asoc */
3949 #ifdef SCTP_ASOCLOG_OF_TSNS
3950 sctp_print_out_track_log(stcb);
3951 #endif
3952 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3953 if (!so_locked) {
3954 atomic_add_int(&stcb->asoc.refcnt, 1);
3955 SCTP_TCB_UNLOCK(stcb);
3956 SCTP_SOCKET_LOCK(so, 1);
3957 SCTP_TCB_LOCK(stcb);
3958 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3959 }
3960 #endif
3961 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3962 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3963 if (!so_locked) {
3964 SCTP_SOCKET_UNLOCK(so, 1);
3965 }
3966 #endif
3967 }
3968
3969 void
3970 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3971 struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3972 {
3973 struct sctp_chunkhdr *ch, chunk_buf;
3974 unsigned int chk_length;
3975
3976 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3977 /* Generate a TO address for future reference */
3978 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3979 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3980 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3981 SCTP_CALLED_DIRECTLY_NOCMPSET);
3982 }
3983 }
3984 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3985 sizeof(*ch), (uint8_t *) & chunk_buf);
3986 while (ch != NULL) {
3987 chk_length = ntohs(ch->chunk_length);
3988 if (chk_length < sizeof(*ch)) {
3989 /* break to abort land */
3990 break;
3991 }
3992 switch (ch->chunk_type) {
3993 case SCTP_COOKIE_ECHO:
3994 /* We hit here only if the assoc is being freed */
3995 return;
3996 case SCTP_PACKET_DROPPED:
3997 /* we don't respond to pkt-dropped */
3998 return;
3999 case SCTP_ABORT_ASSOCIATION:
4000 /* we don't respond with an ABORT to an ABORT */
4001 return;
4002 case SCTP_SHUTDOWN_COMPLETE:
4003 /*
4004 * we ignore it since we are not waiting for it and
4005 * peer is gone
4006 */
4007 return;
4008 case SCTP_SHUTDOWN_ACK:
4009 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4010 return;
4011 default:
4012 break;
4013 }
4014 offset += SCTP_SIZE32(chk_length);
4015 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4016 sizeof(*ch), (uint8_t *) & chunk_buf);
4017 }
4018 sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4019 }
4020
4021 /*
4022 * check the inbound datagram to make sure there is not an abort inside it,
4023 * if there is return 1, else return 0.
4024 */
4025 int
4026 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4027 {
4028 struct sctp_chunkhdr *ch;
4029 struct sctp_init_chunk *init_chk, chunk_buf;
4030 int offset;
4031 unsigned int chk_length;
4032
4033 offset = iphlen + sizeof(struct sctphdr);
4034 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4035 (uint8_t *) & chunk_buf);
4036 while (ch != NULL) {
4037 chk_length = ntohs(ch->chunk_length);
4038 if (chk_length < sizeof(*ch)) {
4039 /* packet is probably corrupt */
4040 break;
4041 }
4042 /* we seem to be ok, is it an abort? */
4043 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4044 /* yep, tell them */
4045 return (1);
4046 }
4047 if (ch->chunk_type == SCTP_INITIATION) {
4048 /* need to update the Vtag */
4049 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4050 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4051 if (init_chk != NULL) {
4052 *vtagfill = ntohl(init_chk->init.initiate_tag);
4053 }
4054 }
4055 /* Nope, move to the next chunk */
4056 offset += SCTP_SIZE32(chk_length);
4057 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4058 sizeof(*ch), (uint8_t *) & chunk_buf);
4059 }
4060 return (0);
4061 }
4062
4063 /*
4064 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4065 * set (i.e. it's 0) so, create this function to compare link local scopes
4066 */
4067 #ifdef INET6
4068 uint32_t
4069 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4070 {
4071 struct sockaddr_in6 a, b;
4072
4073 /* save copies */
4074 a = *addr1;
4075 b = *addr2;
4076
4077 if (a.sin6_scope_id == 0)
4078 if (sa6_recoverscope(&a)) {
4079 /* can't get scope, so can't match */
4080 return (0);
4081 }
4082 if (b.sin6_scope_id == 0)
4083 if (sa6_recoverscope(&b)) {
4084 /* can't get scope, so can't match */
4085 return (0);
4086 }
4087 if (a.sin6_scope_id != b.sin6_scope_id)
4088 return (0);
4089
4090 return (1);
4091 }
4092
4093 /*
4094 * returns a sockaddr_in6 with embedded scope recovered and removed
4095 */
4096 struct sockaddr_in6 *
4097 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4098 {
4099 /* check and strip embedded scope junk */
4100 if (addr->sin6_family == AF_INET6) {
4101 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4102 if (addr->sin6_scope_id == 0) {
4103 *store = *addr;
4104 if (!sa6_recoverscope(store)) {
4105 /* use the recovered scope */
4106 addr = store;
4107 }
4108 } else {
4109 /* else, return the original "to" addr */
4110 in6_clearscope(&addr->sin6_addr);
4111 }
4112 }
4113 }
4114 return (addr);
4115 }
4116
4117 #endif
4118
4119 /*
4120 * are the two addresses the same? currently a "scopeless" check returns: 1
4121 * if same, 0 if not
4122 */
4123 int
4124 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4125 {
4126
4127 /* must be valid */
4128 if (sa1 == NULL || sa2 == NULL)
4129 return (0);
4130
4131 /* must be the same family */
4132 if (sa1->sa_family != sa2->sa_family)
4133 return (0);
4134
4135 switch (sa1->sa_family) {
4136 #ifdef INET6
4137 case AF_INET6:
4138 {
4139 /* IPv6 addresses */
4140 struct sockaddr_in6 *sin6_1, *sin6_2;
4141
4142 sin6_1 = (struct sockaddr_in6 *)sa1;
4143 sin6_2 = (struct sockaddr_in6 *)sa2;
4144 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4145 sin6_2));
4146 }
4147 #endif
4148 case AF_INET:
4149 {
4150 /* IPv4 addresses */
4151 struct sockaddr_in *sin_1, *sin_2;
4152
4153 sin_1 = (struct sockaddr_in *)sa1;
4154 sin_2 = (struct sockaddr_in *)sa2;
4155 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4156 }
4157 default:
4158 /* we don't do these... */
4159 return (0);
4160 }
4161 }
4162
4163 void
4164 sctp_print_address(struct sockaddr *sa)
4165 {
4166 #ifdef INET6
4167 char ip6buf[INET6_ADDRSTRLEN];
4168
4169 ip6buf[0] = 0;
4170 #endif
4171
4172 switch (sa->sa_family) {
4173 #ifdef INET6
4174 case AF_INET6:
4175 {
4176 struct sockaddr_in6 *sin6;
4177
4178 sin6 = (struct sockaddr_in6 *)sa;
4179 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4180 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4181 ntohs(sin6->sin6_port),
4182 sin6->sin6_scope_id);
4183 break;
4184 }
4185 #endif
4186 case AF_INET:
4187 {
4188 struct sockaddr_in *sin;
4189 unsigned char *p;
4190
4191 sin = (struct sockaddr_in *)sa;
4192 p = (unsigned char *)&sin->sin_addr;
4193 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4194 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4195 break;
4196 }
4197 default:
4198 SCTP_PRINTF("?\n");
4199 break;
4200 }
4201 }
4202
4203 void
4204 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4205 {
4206 switch (iph->ip_v) {
4207 case IPVERSION:
4208 {
4209 struct sockaddr_in lsa, fsa;
4210
4211 bzero(&lsa, sizeof(lsa));
4212 lsa.sin_len = sizeof(lsa);
4213 lsa.sin_family = AF_INET;
4214 lsa.sin_addr = iph->ip_src;
4215 lsa.sin_port = sh->src_port;
4216 bzero(&fsa, sizeof(fsa));
4217 fsa.sin_len = sizeof(fsa);
4218 fsa.sin_family = AF_INET;
4219 fsa.sin_addr = iph->ip_dst;
4220 fsa.sin_port = sh->dest_port;
4221 SCTP_PRINTF("src: ");
4222 sctp_print_address((struct sockaddr *)&lsa);
4223 SCTP_PRINTF("dest: ");
4224 sctp_print_address((struct sockaddr *)&fsa);
4225 break;
4226 }
4227 #ifdef INET6
4228 case IPV6_VERSION >> 4:
4229 {
4230 struct ip6_hdr *ip6;
4231 struct sockaddr_in6 lsa6, fsa6;
4232
4233 ip6 = (struct ip6_hdr *)iph;
4234 bzero(&lsa6, sizeof(lsa6));
4235 lsa6.sin6_len = sizeof(lsa6);
4236 lsa6.sin6_family = AF_INET6;
4237 lsa6.sin6_addr = ip6->ip6_src;
4238 lsa6.sin6_port = sh->src_port;
4239 bzero(&fsa6, sizeof(fsa6));
4240 fsa6.sin6_len = sizeof(fsa6);
4241 fsa6.sin6_family = AF_INET6;
4242 fsa6.sin6_addr = ip6->ip6_dst;
4243 fsa6.sin6_port = sh->dest_port;
4244 SCTP_PRINTF("src: ");
4245 sctp_print_address((struct sockaddr *)&lsa6);
4246 SCTP_PRINTF("dest: ");
4247 sctp_print_address((struct sockaddr *)&fsa6);
4248 break;
4249 }
4250 #endif
4251 default:
4252 /* TSNH */
4253 break;
4254 }
4255 }
4256
4257 void
4258 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4259 struct sctp_inpcb *new_inp,
4260 struct sctp_tcb *stcb,
4261 int waitflags)
4262 {
4263 /*
4264 * go through our old INP and pull off any control structures that
4265 * belong to stcb and move then to the new inp.
4266 */
4267 struct socket *old_so, *new_so;
4268 struct sctp_queued_to_read *control, *nctl;
4269 struct sctp_readhead tmp_queue;
4270 struct mbuf *m;
4271 int error = 0;
4272
4273 old_so = old_inp->sctp_socket;
4274 new_so = new_inp->sctp_socket;
4275 TAILQ_INIT(&tmp_queue);
4276 error = sblock(&old_so->so_rcv, waitflags);
4277 if (error) {
4278 /*
4279 * Gak, can't get sblock, we have a problem. data will be
4280 * left stranded.. and we don't dare look at it since the
4281 * other thread may be reading something. Oh well, its a
4282 * screwed up app that does a peeloff OR a accept while
4283 * reading from the main socket... actually its only the
4284 * peeloff() case, since I think read will fail on a
4285 * listening socket..
4286 */
4287 return;
4288 }
4289 /* lock the socket buffers */
4290 SCTP_INP_READ_LOCK(old_inp);
4291 control = TAILQ_FIRST(&old_inp->read_queue);
4292 /* Pull off all for out target stcb */
4293 while (control) {
4294 nctl = TAILQ_NEXT(control, next);
4295 if (control->stcb == stcb) {
4296 /* remove it we want it */
4297 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4298 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4299 m = control->data;
4300 while (m) {
4301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4302 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4303 }
4304 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4306 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4307 }
4308 m = SCTP_BUF_NEXT(m);
4309 }
4310 }
4311 control = nctl;
4312 }
4313 SCTP_INP_READ_UNLOCK(old_inp);
4314 /* Remove the sb-lock on the old socket */
4315
4316 sbunlock(&old_so->so_rcv);
4317 /* Now we move them over to the new socket buffer */
4318 control = TAILQ_FIRST(&tmp_queue);
4319 SCTP_INP_READ_LOCK(new_inp);
4320 while (control) {
4321 nctl = TAILQ_NEXT(control, next);
4322 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4323 m = control->data;
4324 while (m) {
4325 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4326 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4327 }
4328 sctp_sballoc(stcb, &new_so->so_rcv, m);
4329 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4330 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4331 }
4332 m = SCTP_BUF_NEXT(m);
4333 }
4334 control = nctl;
4335 }
4336 SCTP_INP_READ_UNLOCK(new_inp);
4337 }
4338
4339 void
4340 sctp_add_to_readq(struct sctp_inpcb *inp,
4341 struct sctp_tcb *stcb,
4342 struct sctp_queued_to_read *control,
4343 struct sockbuf *sb,
4344 int end,
4345 int inp_read_lock_held,
4346 int so_locked
4347 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4348 SCTP_UNUSED
4349 #endif
4350 )
4351 {
4352 /*
4353 * Here we must place the control on the end of the socket read
4354 * queue AND increment sb_cc so that select will work properly on
4355 * read.
4356 */
4357 struct mbuf *m, *prev = NULL;
4358
4359 if (inp == NULL) {
4360 /* Gak, TSNH!! */
4361 #ifdef INVARIANTS
4362 panic("Gak, inp NULL on add_to_readq");
4363 #endif
4364 return;
4365 }
4366 if (inp_read_lock_held == 0)
4367 SCTP_INP_READ_LOCK(inp);
4368 if (!(control->spec_flags & M_NOTIFICATION)) {
4369 atomic_add_int(&inp->total_recvs, 1);
4370 if (!control->do_not_ref_stcb) {
4371 atomic_add_int(&stcb->total_recvs, 1);
4372 }
4373 }
4374 m = control->data;
4375 control->held_length = 0;
4376 control->length = 0;
4377 while (m) {
4378 if (SCTP_BUF_LEN(m) == 0) {
4379 /* Skip mbufs with NO length */
4380 if (prev == NULL) {
4381 /* First one */
4382 control->data = sctp_m_free(m);
4383 m = control->data;
4384 } else {
4385 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4386 m = SCTP_BUF_NEXT(prev);
4387 }
4388 if (m == NULL) {
4389 control->tail_mbuf = prev;
4390 }
4391 continue;
4392 }
4393 prev = m;
4394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4395 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4396 }
4397 sctp_sballoc(stcb, sb, m);
4398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4399 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4400 }
4401 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4402 m = SCTP_BUF_NEXT(m);
4403 }
4404 if (prev != NULL) {
4405 control->tail_mbuf = prev;
4406 } else {
4407 /* Everything got collapsed out?? */
4408 if (inp_read_lock_held == 0)
4409 SCTP_INP_READ_UNLOCK(inp);
4410 return;
4411 }
4412 if (end) {
4413 control->end_added = 1;
4414 }
4415 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4416 if (inp_read_lock_held == 0)
4417 SCTP_INP_READ_UNLOCK(inp);
4418 if (inp && inp->sctp_socket) {
4419 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4420 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4421 } else {
4422 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4423 struct socket *so;
4424
4425 so = SCTP_INP_SO(inp);
4426 if (!so_locked) {
4427 atomic_add_int(&stcb->asoc.refcnt, 1);
4428 SCTP_TCB_UNLOCK(stcb);
4429 SCTP_SOCKET_LOCK(so, 1);
4430 SCTP_TCB_LOCK(stcb);
4431 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4432 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4433 SCTP_SOCKET_UNLOCK(so, 1);
4434 return;
4435 }
4436 }
4437 #endif
4438 sctp_sorwakeup(inp, inp->sctp_socket);
4439 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 if (!so_locked) {
4441 SCTP_SOCKET_UNLOCK(so, 1);
4442 }
4443 #endif
4444 }
4445 }
4446 }
4447
4448
4449 int
4450 sctp_append_to_readq(struct sctp_inpcb *inp,
4451 struct sctp_tcb *stcb,
4452 struct sctp_queued_to_read *control,
4453 struct mbuf *m,
4454 int end,
4455 int ctls_cumack,
4456 struct sockbuf *sb)
4457 {
4458 /*
4459 * A partial delivery API event is underway. OR we are appending on
4460 * the reassembly queue.
4461 *
4462 * If PDAPI this means we need to add m to the end of the data.
4463 * Increase the length in the control AND increment the sb_cc.
4464 * Otherwise sb is NULL and all we need to do is put it at the end
4465 * of the mbuf chain.
4466 */
4467 int len = 0;
4468 struct mbuf *mm, *tail = NULL, *prev = NULL;
4469
4470 if (inp) {
4471 SCTP_INP_READ_LOCK(inp);
4472 }
4473 if (control == NULL) {
4474 get_out:
4475 if (inp) {
4476 SCTP_INP_READ_UNLOCK(inp);
4477 }
4478 return (-1);
4479 }
4480 if (control->end_added) {
4481 /* huh this one is complete? */
4482 goto get_out;
4483 }
4484 mm = m;
4485 if (mm == NULL) {
4486 goto get_out;
4487 }
4488 while (mm) {
4489 if (SCTP_BUF_LEN(mm) == 0) {
4490 /* Skip mbufs with NO lenght */
4491 if (prev == NULL) {
4492 /* First one */
4493 m = sctp_m_free(mm);
4494 mm = m;
4495 } else {
4496 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4497 mm = SCTP_BUF_NEXT(prev);
4498 }
4499 continue;
4500 }
4501 prev = mm;
4502 len += SCTP_BUF_LEN(mm);
4503 if (sb) {
4504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4505 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4506 }
4507 sctp_sballoc(stcb, sb, mm);
4508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4509 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4510 }
4511 }
4512 mm = SCTP_BUF_NEXT(mm);
4513 }
4514 if (prev) {
4515 tail = prev;
4516 } else {
4517 /* Really there should always be a prev */
4518 if (m == NULL) {
4519 /* Huh nothing left? */
4520 #ifdef INVARIANTS
4521 panic("Nothing left to add?");
4522 #else
4523 goto get_out;
4524 #endif
4525 }
4526 tail = m;
4527 }
4528 if (control->tail_mbuf) {
4529 /* append */
4530 SCTP_BUF_NEXT(control->tail_mbuf) = m;
4531 control->tail_mbuf = tail;
4532 } else {
4533 /* nothing there */
4534 #ifdef INVARIANTS
4535 if (control->data != NULL) {
4536 panic("This should NOT happen");
4537 }
4538 #endif
4539 control->data = m;
4540 control->tail_mbuf = tail;
4541 }
4542 atomic_add_int(&control->length, len);
4543 if (end) {
4544 /* message is complete */
4545 if (stcb && (control == stcb->asoc.control_pdapi)) {
4546 stcb->asoc.control_pdapi = NULL;
4547 }
4548 control->held_length = 0;
4549 control->end_added = 1;
4550 }
4551 if (stcb == NULL) {
4552 control->do_not_ref_stcb = 1;
4553 }
4554 /*
4555 * When we are appending in partial delivery, the cum-ack is used
4556 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4557 * is populated in the outbound sinfo structure from the true cumack
4558 * if the association exists...
4559 */
4560 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4561 if (inp) {
4562 SCTP_INP_READ_UNLOCK(inp);
4563 }
4564 if (inp && inp->sctp_socket) {
4565 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4566 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4567 } else {
4568 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4569 struct socket *so;
4570
4571 so = SCTP_INP_SO(inp);
4572 atomic_add_int(&stcb->asoc.refcnt, 1);
4573 SCTP_TCB_UNLOCK(stcb);
4574 SCTP_SOCKET_LOCK(so, 1);
4575 SCTP_TCB_LOCK(stcb);
4576 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4577 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4578 SCTP_SOCKET_UNLOCK(so, 1);
4579 return (0);
4580 }
4581 #endif
4582 sctp_sorwakeup(inp, inp->sctp_socket);
4583 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4584 SCTP_SOCKET_UNLOCK(so, 1);
4585 #endif
4586 }
4587 }
4588 return (0);
4589 }
4590
4591
4592
4593 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4594 *************ALTERNATE ROUTING CODE
4595 */
4596
4597 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4598 *************ALTERNATE ROUTING CODE
4599 */
4600
4601 struct mbuf *
4602 sctp_generate_invmanparam(int err)
4603 {
4604 /* Return a MBUF with a invalid mandatory parameter */
4605 struct mbuf *m;
4606
4607 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4608 if (m) {
4609 struct sctp_paramhdr *ph;
4610
4611 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4612 ph = mtod(m, struct sctp_paramhdr *);
4613 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4614 ph->param_type = htons(err);
4615 }
4616 return (m);
4617 }
4618
4619 #ifdef SCTP_MBCNT_LOGGING
4620 void
4621 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4622 struct sctp_tmit_chunk *tp1, int chk_cnt)
4623 {
4624 if (tp1->data == NULL) {
4625 return;
4626 }
4627 asoc->chunks_on_out_queue -= chk_cnt;
4628 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4629 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4630 asoc->total_output_queue_size,
4631 tp1->book_size,
4632 0,
4633 tp1->mbcnt);
4634 }
4635 if (asoc->total_output_queue_size >= tp1->book_size) {
4636 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4637 } else {
4638 asoc->total_output_queue_size = 0;
4639 }
4640
4641 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4642 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4643 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4644 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4645 } else {
4646 stcb->sctp_socket->so_snd.sb_cc = 0;
4647
4648 }
4649 }
4650 }
4651
4652 #endif
4653
4654 int
4655 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4656 int reason, int so_locked
4657 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4658 SCTP_UNUSED
4659 #endif
4660 )
4661 {
4662 struct sctp_stream_out *strq;
4663 struct sctp_tmit_chunk *chk = NULL;
4664 struct sctp_stream_queue_pending *sp;
4665 uint16_t stream = 0, seq = 0;
4666 uint8_t foundeom = 0;
4667 int ret_sz = 0;
4668 int notdone;
4669 int do_wakeup_routine = 0;
4670
4671 stream = tp1->rec.data.stream_number;
4672 seq = tp1->rec.data.stream_seq;
4673 do {
4674 ret_sz += tp1->book_size;
4675 if (tp1->data != NULL) {
4676 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4677 sctp_flight_size_decrease(tp1);
4678 sctp_total_flight_decrease(stcb, tp1);
4679 }
4680 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4681 stcb->asoc.peers_rwnd += tp1->send_size;
4682 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4683 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4684 if (tp1->data) {
4685 sctp_m_freem(tp1->data);
4686 tp1->data = NULL;
4687 }
4688 do_wakeup_routine = 1;
4689 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4690 stcb->asoc.sent_queue_cnt_removeable--;
4691 }
4692 }
4693 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4694 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4695 SCTP_DATA_NOT_FRAG) {
4696 /* not frag'ed we ae done */
4697 notdone = 0;
4698 foundeom = 1;
4699 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4700 /* end of frag, we are done */
4701 notdone = 0;
4702 foundeom = 1;
4703 } else {
4704 /*
4705 * Its a begin or middle piece, we must mark all of
4706 * it
4707 */
4708 notdone = 1;
4709 tp1 = TAILQ_NEXT(tp1, sctp_next);
4710 }
4711 } while (tp1 && notdone);
4712 if (foundeom == 0) {
4713 /*
4714 * The multi-part message was scattered across the send and
4715 * sent queue.
4716 */
4717 next_on_sent:
4718 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4719 /*
4720 * recurse throught the send_queue too, starting at the
4721 * beginning.
4722 */
4723 if ((tp1) &&
4724 (tp1->rec.data.stream_number == stream) &&
4725 (tp1->rec.data.stream_seq == seq)) {
4726 /*
4727 * save to chk in case we have some on stream out
4728 * queue. If so and we have an un-transmitted one we
4729 * don't have to fudge the TSN.
4730 */
4731 chk = tp1;
4732 ret_sz += tp1->book_size;
4733 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4734 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4735 if (tp1->data) {
4736 sctp_m_freem(tp1->data);
4737 tp1->data = NULL;
4738 }
4739 /* No flight involved here book the size to 0 */
4740 tp1->book_size = 0;
4741 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4742 foundeom = 1;
4743 }
4744 do_wakeup_routine = 1;
4745 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4746 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4747 /*
4748 * on to the sent queue so we can wait for it to be
4749 * passed by.
4750 */
4751 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4752 sctp_next);
4753 stcb->asoc.send_queue_cnt--;
4754 stcb->asoc.sent_queue_cnt++;
4755 goto next_on_sent;
4756 }
4757 }
4758 if (foundeom == 0) {
4759 /*
4760 * Still no eom found. That means there is stuff left on the
4761 * stream out queue.. yuck.
4762 */
4763 strq = &stcb->asoc.strmout[stream];
4764 SCTP_TCB_SEND_LOCK(stcb);
4765 sp = TAILQ_FIRST(&strq->outqueue);
4766 while (sp->strseq <= seq) {
4767 /* Check if its our SEQ */
4768 if (sp->strseq == seq) {
4769 sp->discard_rest = 1;
4770 /*
4771 * We may need to put a chunk on the queue
4772 * that holds the TSN that would have been
4773 * sent with the LAST bit.
4774 */
4775 if (chk == NULL) {
4776 /* Yep, we have to */
4777 sctp_alloc_a_chunk(stcb, chk);
4778 if (chk == NULL) {
4779 /*
4780 * we are hosed. All we can
4781 * do is nothing.. which
4782 * will cause an abort if
4783 * the peer is paying
4784 * attention.
4785 */
4786 goto oh_well;
4787 }
4788 memset(chk, 0, sizeof(*chk));
4789 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4790 chk->sent = SCTP_FORWARD_TSN_SKIP;
4791 chk->asoc = &stcb->asoc;
4792 chk->rec.data.stream_seq = sp->strseq;
4793 chk->rec.data.stream_number = sp->stream;
4794 chk->rec.data.payloadtype = sp->ppid;
4795 chk->rec.data.context = sp->context;
4796 chk->flags = sp->act_flags;
4797 chk->whoTo = sp->net;
4798 atomic_add_int(&chk->whoTo->ref_count, 1);
4799 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4800 stcb->asoc.pr_sctp_cnt++;
4801 chk->pr_sctp_on = 1;
4802 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4803 stcb->asoc.sent_queue_cnt++;
4804 stcb->asoc.pr_sctp_cnt++;
4805 } else {
4806 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4807 }
4808 oh_well:
4809 if (sp->data) {
4810 /*
4811 * Pull any data to free up the SB
4812 * and allow sender to "add more"
4813 * whilc we will throw away :-)
4814 */
4815 sctp_free_spbufspace(stcb, &stcb->asoc,
4816 sp);
4817 ret_sz += sp->length;
4818 do_wakeup_routine = 1;
4819 sp->some_taken = 1;
4820 sctp_m_freem(sp->data);
4821 sp->length = 0;
4822 sp->data = NULL;
4823 sp->tail_mbuf = NULL;
4824 }
4825 break;
4826 } else {
4827 /* Next one please */
4828 sp = TAILQ_NEXT(sp, next);
4829 }
4830 } /* End while */
4831 SCTP_TCB_SEND_UNLOCK(stcb);
4832 }
4833 if (do_wakeup_routine) {
4834 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4835 struct socket *so;
4836
4837 so = SCTP_INP_SO(stcb->sctp_ep);
4838 if (!so_locked) {
4839 atomic_add_int(&stcb->asoc.refcnt, 1);
4840 SCTP_TCB_UNLOCK(stcb);
4841 SCTP_SOCKET_LOCK(so, 1);
4842 SCTP_TCB_LOCK(stcb);
4843 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4844 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4845 /* assoc was freed while we were unlocked */
4846 SCTP_SOCKET_UNLOCK(so, 1);
4847 return (ret_sz);
4848 }
4849 }
4850 #endif
4851 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4852 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4853 if (!so_locked) {
4854 SCTP_SOCKET_UNLOCK(so, 1);
4855 }
4856 #endif
4857 }
4858 return (ret_sz);
4859 }
4860
4861 /*
4862 * checks to see if the given address, sa, is one that is currently known by
4863 * the kernel note: can't distinguish the same address on multiple interfaces
4864 * and doesn't handle multiple addresses with different zone/scope id's note:
4865 * ifa_ifwithaddr() compares the entire sockaddr struct
4866 */
4867 struct sctp_ifa *
4868 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4869 int holds_lock)
4870 {
4871 struct sctp_laddr *laddr;
4872
4873 if (holds_lock == 0) {
4874 SCTP_INP_RLOCK(inp);
4875 }
4876 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4877 if (laddr->ifa == NULL)
4878 continue;
4879 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4880 continue;
4881 if (addr->sa_family == AF_INET) {
4882 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4883 laddr->ifa->address.sin.sin_addr.s_addr) {
4884 /* found him. */
4885 if (holds_lock == 0) {
4886 SCTP_INP_RUNLOCK(inp);
4887 }
4888 return (laddr->ifa);
4889 break;
4890 }
4891 }
4892 #ifdef INET6
4893 if (addr->sa_family == AF_INET6) {
4894 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4895 &laddr->ifa->address.sin6)) {
4896 /* found him. */
4897 if (holds_lock == 0) {
4898 SCTP_INP_RUNLOCK(inp);
4899 }
4900 return (laddr->ifa);
4901 break;
4902 }
4903 }
4904 #endif
4905 }
4906 if (holds_lock == 0) {
4907 SCTP_INP_RUNLOCK(inp);
4908 }
4909 return (NULL);
4910 }
4911
4912 uint32_t
4913 sctp_get_ifa_hash_val(struct sockaddr *addr)
4914 {
4915 if (addr->sa_family == AF_INET) {
4916 struct sockaddr_in *sin;
4917
4918 sin = (struct sockaddr_in *)addr;
4919 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4920 } else if (addr->sa_family == AF_INET6) {
4921 struct sockaddr_in6 *sin6;
4922 uint32_t hash_of_addr;
4923
4924 sin6 = (struct sockaddr_in6 *)addr;
4925 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4926 sin6->sin6_addr.s6_addr32[1] +
4927 sin6->sin6_addr.s6_addr32[2] +
4928 sin6->sin6_addr.s6_addr32[3]);
4929 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4930 return (hash_of_addr);
4931 }
4932 return (0);
4933 }
4934
4935 struct sctp_ifa *
4936 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4937 {
4938 struct sctp_ifa *sctp_ifap;
4939 struct sctp_vrf *vrf;
4940 struct sctp_ifalist *hash_head;
4941 uint32_t hash_of_addr;
4942
4943 if (holds_lock == 0)
4944 SCTP_IPI_ADDR_RLOCK();
4945
4946 vrf = sctp_find_vrf(vrf_id);
4947 if (vrf == NULL) {
4948 stage_right:
4949 if (holds_lock == 0)
4950 SCTP_IPI_ADDR_RUNLOCK();
4951 return (NULL);
4952 }
4953 hash_of_addr = sctp_get_ifa_hash_val(addr);
4954
4955 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4956 if (hash_head == NULL) {
4957 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4958 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4959 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4960 sctp_print_address(addr);
4961 SCTP_PRINTF("No such bucket for address\n");
4962 if (holds_lock == 0)
4963 SCTP_IPI_ADDR_RUNLOCK();
4964
4965 return (NULL);
4966 }
4967 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4968 if (sctp_ifap == NULL) {
4969 #ifdef INVARIANTS
4970 panic("Huh LIST_FOREACH corrupt");
4971 goto stage_right;
4972 #else
4973 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4974 goto stage_right;
4975 #endif
4976 }
4977 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4978 continue;
4979 if (addr->sa_family == AF_INET) {
4980 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4981 sctp_ifap->address.sin.sin_addr.s_addr) {
4982 /* found him. */
4983 if (holds_lock == 0)
4984 SCTP_IPI_ADDR_RUNLOCK();
4985 return (sctp_ifap);
4986 break;
4987 }
4988 }
4989 #ifdef INET6
4990 if (addr->sa_family == AF_INET6) {
4991 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4992 &sctp_ifap->address.sin6)) {
4993 /* found him. */
4994 if (holds_lock == 0)
4995 SCTP_IPI_ADDR_RUNLOCK();
4996 return (sctp_ifap);
4997 break;
4998 }
4999 }
5000 #endif
5001 }
5002 if (holds_lock == 0)
5003 SCTP_IPI_ADDR_RUNLOCK();
5004 return (NULL);
5005 }
5006
5007 static void
5008 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5009 uint32_t rwnd_req)
5010 {
5011 /* User pulled some data, do we need a rwnd update? */
5012 int r_unlocked = 0;
5013 uint32_t dif, rwnd;
5014 struct socket *so = NULL;
5015
5016 if (stcb == NULL)
5017 return;
5018
5019 atomic_add_int(&stcb->asoc.refcnt, 1);
5020
5021 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5022 SCTP_STATE_SHUTDOWN_RECEIVED |
5023 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5024 /* Pre-check If we are freeing no update */
5025 goto no_lock;
5026 }
5027 SCTP_INP_INCR_REF(stcb->sctp_ep);
5028 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5029 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5030 goto out;
5031 }
5032 so = stcb->sctp_socket;
5033 if (so == NULL) {
5034 goto out;
5035 }
5036 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5037 /* Have you have freed enough to look */
5038 *freed_so_far = 0;
5039 /* Yep, its worth a look and the lock overhead */
5040
5041 /* Figure out what the rwnd would be */
5042 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5043 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5044 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5045 } else {
5046 dif = 0;
5047 }
5048 if (dif >= rwnd_req) {
5049 if (hold_rlock) {
5050 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5051 r_unlocked = 1;
5052 }
5053 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5054 /*
5055 * One last check before we allow the guy possibly
5056 * to get in. There is a race, where the guy has not
5057 * reached the gate. In that case
5058 */
5059 goto out;
5060 }
5061 SCTP_TCB_LOCK(stcb);
5062 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5063 /* No reports here */
5064 SCTP_TCB_UNLOCK(stcb);
5065 goto out;
5066 }
5067 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5068 sctp_send_sack(stcb);
5069
5070 sctp_chunk_output(stcb->sctp_ep, stcb,
5071 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5072 /* make sure no timer is running */
5073 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5074 SCTP_TCB_UNLOCK(stcb);
5075 } else {
5076 /* Update how much we have pending */
5077 stcb->freed_by_sorcv_sincelast = dif;
5078 }
5079 out:
5080 if (so && r_unlocked && hold_rlock) {
5081 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5082 }
5083 SCTP_INP_DECR_REF(stcb->sctp_ep);
5084 no_lock:
5085 atomic_add_int(&stcb->asoc.refcnt, -1);
5086 return;
5087 }
5088
5089 int
5090 sctp_sorecvmsg(struct socket *so,
5091 struct uio *uio,
5092 struct mbuf **mp,
5093 struct sockaddr *from,
5094 int fromlen,
5095 int *msg_flags,
5096 struct sctp_sndrcvinfo *sinfo,
5097 int filling_sinfo)
5098 {
5099 /*
5100 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5101 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5102 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5103 * On the way out we may send out any combination of:
5104 * MSG_NOTIFICATION MSG_EOR
5105 *
5106 */
5107 struct sctp_inpcb *inp = NULL;
5108 int my_len = 0;
5109 int cp_len = 0, error = 0;
5110 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5111 struct mbuf *m = NULL, *embuf = NULL;
5112 struct sctp_tcb *stcb = NULL;
5113 int wakeup_read_socket = 0;
5114 int freecnt_applied = 0;
5115 int out_flags = 0, in_flags = 0;
5116 int block_allowed = 1;
5117 uint32_t freed_so_far = 0;
5118 uint32_t copied_so_far = 0;
5119 int in_eeor_mode = 0;
5120 int no_rcv_needed = 0;
5121 uint32_t rwnd_req = 0;
5122 int hold_sblock = 0;
5123 int hold_rlock = 0;
5124 int slen = 0;
5125 uint32_t held_length = 0;
5126 int sockbuf_lock = 0;
5127
5128 if (uio == NULL) {
5129 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5130 return (EINVAL);
5131 }
5132 if (msg_flags) {
5133 in_flags = *msg_flags;
5134 if (in_flags & MSG_PEEK)
5135 SCTP_STAT_INCR(sctps_read_peeks);
5136 } else {
5137 in_flags = 0;
5138 }
5139 slen = uio->uio_resid;
5140
5141 /* Pull in and set up our int flags */
5142 if (in_flags & MSG_OOB) {
5143 /* Out of band's NOT supported */
5144 return (EOPNOTSUPP);
5145 }
5146 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5147 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5148 return (EINVAL);
5149 }
5150 if ((in_flags & (MSG_DONTWAIT
5151 | MSG_NBIO
5152 )) ||
5153 SCTP_SO_IS_NBIO(so)) {
5154 block_allowed = 0;
5155 }
5156 /* setup the endpoint */
5157 inp = (struct sctp_inpcb *)so->so_pcb;
5158 if (inp == NULL) {
5159 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5160 return (EFAULT);
5161 }
5162 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5163 /* Must be at least a MTU's worth */
5164 if (rwnd_req < SCTP_MIN_RWND)
5165 rwnd_req = SCTP_MIN_RWND;
5166 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5168 sctp_misc_ints(SCTP_SORECV_ENTER,
5169 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5170 }
5171 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5172 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5173 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5174 }
5175 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5176 sockbuf_lock = 1;
5177 if (error) {
5178 goto release_unlocked;
5179 }
5180 restart:
5181
5182
5183 restart_nosblocks:
5184 if (hold_sblock == 0) {
5185 SOCKBUF_LOCK(&so->so_rcv);
5186 hold_sblock = 1;
5187 }
5188 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5189 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5190 goto out;
5191 }
5192 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5193 if (so->so_error) {
5194 error = so->so_error;
5195 if ((in_flags & MSG_PEEK) == 0)
5196 so->so_error = 0;
5197 goto out;
5198 } else {
5199 if (so->so_rcv.sb_cc == 0) {
5200 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5201 /* indicate EOF */
5202 error = 0;
5203 goto out;
5204 }
5205 }
5206 }
5207 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5208 /* we need to wait for data */
5209 if ((so->so_rcv.sb_cc == 0) &&
5210 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5211 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5212 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5213 /*
5214 * For active open side clear flags for
5215 * re-use passive open is blocked by
5216 * connect.
5217 */
5218 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5219 /*
5220 * You were aborted, passive side
5221 * always hits here
5222 */
5223 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5224 error = ECONNRESET;
5225 /*
5226 * You get this once if you are
5227 * active open side
5228 */
5229 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5230 /*
5231 * Remove flag if on the
5232 * active open side
5233 */
5234 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5235 }
5236 }
5237 so->so_state &= ~(SS_ISCONNECTING |
5238 SS_ISDISCONNECTING |
5239 SS_ISCONFIRMING |
5240 SS_ISCONNECTED);
5241 if (error == 0) {
5242 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5243 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5244 error = ENOTCONN;
5245 } else {
5246 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5247 }
5248 }
5249 goto out;
5250 }
5251 }
5252 error = sbwait(&so->so_rcv);
5253 if (error) {
5254 goto out;
5255 }
5256 held_length = 0;
5257 goto restart_nosblocks;
5258 } else if (so->so_rcv.sb_cc == 0) {
5259 if (so->so_error) {
5260 error = so->so_error;
5261 if ((in_flags & MSG_PEEK) == 0)
5262 so->so_error = 0;
5263 } else {
5264 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5265 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5266 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5267 /*
5268 * For active open side clear flags
5269 * for re-use passive open is
5270 * blocked by connect.
5271 */
5272 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5273 /*
5274 * You were aborted, passive
5275 * side always hits here
5276 */
5277 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5278 error = ECONNRESET;
5279 /*
5280 * You get this once if you
5281 * are active open side
5282 */
5283 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5284 /*
5285 * Remove flag if on
5286 * the active open
5287 * side
5288 */
5289 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5290 }
5291 }
5292 so->so_state &= ~(SS_ISCONNECTING |
5293 SS_ISDISCONNECTING |
5294 SS_ISCONFIRMING |
5295 SS_ISCONNECTED);
5296 if (error == 0) {
5297 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5298 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5299 error = ENOTCONN;
5300 } else {
5301 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5302 }
5303 }
5304 goto out;
5305 }
5306 }
5307 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5308 error = EWOULDBLOCK;
5309 }
5310 goto out;
5311 }
5312 if (hold_sblock == 1) {
5313 SOCKBUF_UNLOCK(&so->so_rcv);
5314 hold_sblock = 0;
5315 }
5316 /* we possibly have data we can read */
5317 /* sa_ignore FREED_MEMORY */
5318 control = TAILQ_FIRST(&inp->read_queue);
5319 if (control == NULL) {
5320 /*
5321 * This could be happening since the appender did the
5322 * increment but as not yet did the tailq insert onto the
5323 * read_queue
5324 */
5325 if (hold_rlock == 0) {
5326 SCTP_INP_READ_LOCK(inp);
5327 hold_rlock = 1;
5328 }
5329 control = TAILQ_FIRST(&inp->read_queue);
5330 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5331 #ifdef INVARIANTS
5332 panic("Huh, its non zero and nothing on control?");
5333 #endif
5334 so->so_rcv.sb_cc = 0;
5335 }
5336 SCTP_INP_READ_UNLOCK(inp);
5337 hold_rlock = 0;
5338 goto restart;
5339 }
5340 if ((control->length == 0) &&
5341 (control->do_not_ref_stcb)) {
5342 /*
5343 * Clean up code for freeing assoc that left behind a
5344 * pdapi.. maybe a peer in EEOR that just closed after
5345 * sending and never indicated a EOR.
5346 */
5347 if (hold_rlock == 0) {
5348 hold_rlock = 1;
5349 SCTP_INP_READ_LOCK(inp);
5350 }
5351 control->held_length = 0;
5352 if (control->data) {
5353 /* Hmm there is data here .. fix */
5354 struct mbuf *m_tmp;
5355 int cnt = 0;
5356
5357 m_tmp = control->data;
5358 while (m_tmp) {
5359 cnt += SCTP_BUF_LEN(m_tmp);
5360 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5361 control->tail_mbuf = m_tmp;
5362 control->end_added = 1;
5363 }
5364 m_tmp = SCTP_BUF_NEXT(m_tmp);
5365 }
5366 control->length = cnt;
5367 } else {
5368 /* remove it */
5369 TAILQ_REMOVE(&inp->read_queue, control, next);
5370 /* Add back any hiddend data */
5371 sctp_free_remote_addr(control->whoFrom);
5372 sctp_free_a_readq(stcb, control);
5373 }
5374 if (hold_rlock) {
5375 hold_rlock = 0;
5376 SCTP_INP_READ_UNLOCK(inp);
5377 }
5378 goto restart;
5379 }
5380 if ((control->length == 0) &&
5381 (control->end_added == 1)) {
5382 /*
5383 * Do we also need to check for (control->pdapi_aborted ==
5384 * 1)?
5385 */
5386 if (hold_rlock == 0) {
5387 hold_rlock = 1;
5388 SCTP_INP_READ_LOCK(inp);
5389 }
5390 TAILQ_REMOVE(&inp->read_queue, control, next);
5391 if (control->data) {
5392 #ifdef INVARIANTS
5393 panic("control->data not null but control->length == 0");
5394 #else
5395 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5396 sctp_m_freem(control->data);
5397 control->data = NULL;
5398 #endif
5399 }
5400 if (control->aux_data) {
5401 sctp_m_free(control->aux_data);
5402 control->aux_data = NULL;
5403 }
5404 sctp_free_remote_addr(control->whoFrom);
5405 sctp_free_a_readq(stcb, control);
5406 if (hold_rlock) {
5407 hold_rlock = 0;
5408 SCTP_INP_READ_UNLOCK(inp);
5409 }
5410 goto restart;
5411 }
5412 if (control->length == 0) {
5413 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5414 (filling_sinfo)) {
5415 /* find a more suitable one then this */
5416 ctl = TAILQ_NEXT(control, next);
5417 while (ctl) {
5418 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5419 (ctl->some_taken ||
5420 (ctl->spec_flags & M_NOTIFICATION) ||
5421 ((ctl->do_not_ref_stcb == 0) &&
5422 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5423 ) {
5424 /*-
5425 * If we have a different TCB next, and there is data
5426 * present. If we have already taken some (pdapi), OR we can
5427 * ref the tcb and no delivery as started on this stream, we
5428 * take it. Note we allow a notification on a different
5429 * assoc to be delivered..
5430 */
5431 control = ctl;
5432 goto found_one;
5433 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5434 (ctl->length) &&
5435 ((ctl->some_taken) ||
5436 ((ctl->do_not_ref_stcb == 0) &&
5437 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5438 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5439 /*-
5440 * If we have the same tcb, and there is data present, and we
5441 * have the strm interleave feature present. Then if we have
5442 * taken some (pdapi) or we can refer to tht tcb AND we have
5443 * not started a delivery for this stream, we can take it.
5444 * Note we do NOT allow a notificaiton on the same assoc to
5445 * be delivered.
5446 */
5447 control = ctl;
5448 goto found_one;
5449 }
5450 ctl = TAILQ_NEXT(ctl, next);
5451 }
5452 }
5453 /*
5454 * if we reach here, not suitable replacement is available
5455 * <or> fragment interleave is NOT on. So stuff the sb_cc
5456 * into the our held count, and its time to sleep again.
5457 */
5458 held_length = so->so_rcv.sb_cc;
5459 control->held_length = so->so_rcv.sb_cc;
5460 goto restart;
5461 }
5462 /* Clear the held length since there is something to read */
5463 control->held_length = 0;
5464 if (hold_rlock) {
5465 SCTP_INP_READ_UNLOCK(inp);
5466 hold_rlock = 0;
5467 }
5468 found_one:
5469 /*
5470 * If we reach here, control has a some data for us to read off.
5471 * Note that stcb COULD be NULL.
5472 */
5473 control->some_taken++;
5474 if (hold_sblock) {
5475 SOCKBUF_UNLOCK(&so->so_rcv);
5476 hold_sblock = 0;
5477 }
5478 stcb = control->stcb;
5479 if (stcb) {
5480 if ((control->do_not_ref_stcb == 0) &&
5481 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5482 if (freecnt_applied == 0)
5483 stcb = NULL;
5484 } else if (control->do_not_ref_stcb == 0) {
5485 /* you can't free it on me please */
5486 /*
5487 * The lock on the socket buffer protects us so the
5488 * free code will stop. But since we used the
5489 * socketbuf lock and the sender uses the tcb_lock
5490 * to increment, we need to use the atomic add to
5491 * the refcnt
5492 */
5493 if (freecnt_applied) {
5494 #ifdef INVARIANTS
5495 panic("refcnt already incremented");
5496 #else
5497 printf("refcnt already incremented?\n");
5498 #endif
5499 } else {
5500 atomic_add_int(&stcb->asoc.refcnt, 1);
5501 freecnt_applied = 1;
5502 }
5503 /*
5504 * Setup to remember how much we have not yet told
5505 * the peer our rwnd has opened up. Note we grab the
5506 * value from the tcb from last time. Note too that
5507 * sack sending clears this when a sack is sent,
5508 * which is fine. Once we hit the rwnd_req, we then
5509 * will go to the sctp_user_rcvd() that will not
5510 * lock until it KNOWs it MUST send a WUP-SACK.
5511 */
5512 freed_so_far = stcb->freed_by_sorcv_sincelast;
5513 stcb->freed_by_sorcv_sincelast = 0;
5514 }
5515 }
5516 if (stcb &&
5517 ((control->spec_flags & M_NOTIFICATION) == 0) &&
5518 control->do_not_ref_stcb == 0) {
5519 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5520 }
5521 /* First lets get off the sinfo and sockaddr info */
5522 if ((sinfo) && filling_sinfo) {
5523 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5524 nxt = TAILQ_NEXT(control, next);
5525 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5526 struct sctp_extrcvinfo *s_extra;
5527
5528 s_extra = (struct sctp_extrcvinfo *)sinfo;
5529 if ((nxt) &&
5530 (nxt->length)) {
5531 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5532 if (nxt->sinfo_flags & SCTP_UNORDERED) {
5533 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5534 }
5535 if (nxt->spec_flags & M_NOTIFICATION) {
5536 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5537 }
5538 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5539 s_extra->sreinfo_next_length = nxt->length;
5540 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5541 s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5542 if (nxt->tail_mbuf != NULL) {
5543 if (nxt->end_added) {
5544 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5545 }
5546 }
5547 } else {
5548 /*
5549 * we explicitly 0 this, since the memcpy
5550 * got some other things beyond the older
5551 * sinfo_ that is on the control's structure
5552 * :-D
5553 */
5554 nxt = NULL;
5555 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5556 s_extra->sreinfo_next_aid = 0;
5557 s_extra->sreinfo_next_length = 0;
5558 s_extra->sreinfo_next_ppid = 0;
5559 s_extra->sreinfo_next_stream = 0;
5560 }
5561 }
5562 /*
5563 * update off the real current cum-ack, if we have an stcb.
5564 */
5565 if ((control->do_not_ref_stcb == 0) && stcb)
5566 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5567 /*
5568 * mask off the high bits, we keep the actual chunk bits in
5569 * there.
5570 */
5571 sinfo->sinfo_flags &= 0x00ff;
5572 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5573 sinfo->sinfo_flags |= SCTP_UNORDERED;
5574 }
5575 }
5576 #ifdef SCTP_ASOCLOG_OF_TSNS
5577 {
5578 int index, newindex;
5579 struct sctp_pcbtsn_rlog *entry;
5580
5581 do {
5582 index = inp->readlog_index;
5583 newindex = index + 1;
5584 if (newindex >= SCTP_READ_LOG_SIZE) {
5585 newindex = 0;
5586 }
5587 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5588 entry = &inp->readlog[index];
5589 entry->vtag = control->sinfo_assoc_id;
5590 entry->strm = control->sinfo_stream;
5591 entry->seq = control->sinfo_ssn;
5592 entry->sz = control->length;
5593 entry->flgs = control->sinfo_flags;
5594 }
5595 #endif
5596 if (fromlen && from) {
5597 struct sockaddr *to;
5598
5599 #ifdef INET
5600 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5601 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5602 ((struct sockaddr_in *)from)->sin_port = control->port_from;
5603 #else
5604 /* No AF_INET use AF_INET6 */
5605 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5606 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5607 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5608 #endif
5609
5610 to = from;
5611 #if defined(INET) && defined(INET6)
5612 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5613 (to->sa_family == AF_INET) &&
5614 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5615 struct sockaddr_in *sin;
5616 struct sockaddr_in6 sin6;
5617
5618 sin = (struct sockaddr_in *)to;
5619 bzero(&sin6, sizeof(sin6));
5620 sin6.sin6_family = AF_INET6;
5621 sin6.sin6_len = sizeof(struct sockaddr_in6);
5622 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5623 bcopy(&sin->sin_addr,
5624 &sin6.sin6_addr.s6_addr32[3],
5625 sizeof(sin6.sin6_addr.s6_addr32[3]));
5626 sin6.sin6_port = sin->sin_port;
5627 memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5628 }
5629 #endif
5630 #if defined(INET6)
5631 {
5632 struct sockaddr_in6 lsa6, *to6;
5633
5634 to6 = (struct sockaddr_in6 *)to;
5635 sctp_recover_scope_mac(to6, (&lsa6));
5636 }
5637 #endif
5638 }
5639 /* now copy out what data we can */
5640 if (mp == NULL) {
5641 /* copy out each mbuf in the chain up to length */
5642 get_more_data:
5643 m = control->data;
5644 while (m) {
5645 /* Move out all we can */
5646 cp_len = (int)uio->uio_resid;
5647 my_len = (int)SCTP_BUF_LEN(m);
5648 if (cp_len > my_len) {
5649 /* not enough in this buf */
5650 cp_len = my_len;
5651 }
5652 if (hold_rlock) {
5653 SCTP_INP_READ_UNLOCK(inp);
5654 hold_rlock = 0;
5655 }
5656 if (cp_len > 0)
5657 error = uiomove(mtod(m, char *), cp_len, uio);
5658 /* re-read */
5659 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5660 goto release;
5661 }
5662 if ((control->do_not_ref_stcb == 0) && stcb &&
5663 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5664 no_rcv_needed = 1;
5665 }
5666 if (error) {
5667 /* error we are out of here */
5668 goto release;
5669 }
5670 if ((SCTP_BUF_NEXT(m) == NULL) &&
5671 (cp_len >= SCTP_BUF_LEN(m)) &&
5672 ((control->end_added == 0) ||
5673 (control->end_added &&
5674 (TAILQ_NEXT(control, next) == NULL)))
5675 ) {
5676 SCTP_INP_READ_LOCK(inp);
5677 hold_rlock = 1;
5678 }
5679 if (cp_len == SCTP_BUF_LEN(m)) {
5680 if ((SCTP_BUF_NEXT(m) == NULL) &&
5681 (control->end_added)) {
5682 out_flags |= MSG_EOR;
5683 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5684 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5685 }
5686 if (control->spec_flags & M_NOTIFICATION) {
5687 out_flags |= MSG_NOTIFICATION;
5688 }
5689 /* we ate up the mbuf */
5690 if (in_flags & MSG_PEEK) {
5691 /* just looking */
5692 m = SCTP_BUF_NEXT(m);
5693 copied_so_far += cp_len;
5694 } else {
5695 /* dispose of the mbuf */
5696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5697 sctp_sblog(&so->so_rcv,
5698 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5699 }
5700 sctp_sbfree(control, stcb, &so->so_rcv, m);
5701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5702 sctp_sblog(&so->so_rcv,
5703 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5704 }
5705 embuf = m;
5706 copied_so_far += cp_len;
5707 freed_so_far += cp_len;
5708 freed_so_far += MSIZE;
5709 atomic_subtract_int(&control->length, cp_len);
5710 control->data = sctp_m_free(m);
5711 m = control->data;
5712 /*
5713 * been through it all, must hold sb
5714 * lock ok to null tail
5715 */
5716 if (control->data == NULL) {
5717 #ifdef INVARIANTS
5718 if ((control->end_added == 0) ||
5719 (TAILQ_NEXT(control, next) == NULL)) {
5720 /*
5721 * If the end is not
5722 * added, OR the
5723 * next is NOT null
5724 * we MUST have the
5725 * lock.
5726 */
5727 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5728 panic("Hmm we don't own the lock?");
5729 }
5730 }
5731 #endif
5732 control->tail_mbuf = NULL;
5733 #ifdef INVARIANTS
5734 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5735 panic("end_added, nothing left and no MSG_EOR");
5736 }
5737 #endif
5738 }
5739 }
5740 } else {
5741 /* Do we need to trim the mbuf? */
5742 if (control->spec_flags & M_NOTIFICATION) {
5743 out_flags |= MSG_NOTIFICATION;
5744 }
5745 if ((in_flags & MSG_PEEK) == 0) {
5746 SCTP_BUF_RESV_UF(m, cp_len);
5747 SCTP_BUF_LEN(m) -= cp_len;
5748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5749 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5750 }
5751 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5752 if ((control->do_not_ref_stcb == 0) &&
5753 stcb) {
5754 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5755 }
5756 copied_so_far += cp_len;
5757 embuf = m;
5758 freed_so_far += cp_len;
5759 freed_so_far += MSIZE;
5760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5761 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5762 SCTP_LOG_SBRESULT, 0);
5763 }
5764 atomic_subtract_int(&control->length, cp_len);
5765 } else {
5766 copied_so_far += cp_len;
5767 }
5768 }
5769 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5770 break;
5771 }
5772 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5773 (control->do_not_ref_stcb == 0) &&
5774 (freed_so_far >= rwnd_req)) {
5775 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5776 }
5777 } /* end while(m) */
5778 /*
5779 * At this point we have looked at it all and we either have
5780 * a MSG_EOR/or read all the user wants... <OR>
5781 * control->length == 0.
5782 */
5783 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5784 /* we are done with this control */
5785 if (control->length == 0) {
5786 if (control->data) {
5787 #ifdef INVARIANTS
5788 panic("control->data not null at read eor?");
5789 #else
5790 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5791 sctp_m_freem(control->data);
5792 control->data = NULL;
5793 #endif
5794 }
5795 done_with_control:
5796 if (TAILQ_NEXT(control, next) == NULL) {
5797 /*
5798 * If we don't have a next we need a
5799 * lock, if there is a next
5800 * interrupt is filling ahead of us
5801 * and we don't need a lock to
5802 * remove this guy (which is the
5803 * head of the queue).
5804 */
5805 if (hold_rlock == 0) {
5806 SCTP_INP_READ_LOCK(inp);
5807 hold_rlock = 1;
5808 }
5809 }
5810 TAILQ_REMOVE(&inp->read_queue, control, next);
5811 /* Add back any hiddend data */
5812 if (control->held_length) {
5813 held_length = 0;
5814 control->held_length = 0;
5815 wakeup_read_socket = 1;
5816 }
5817 if (control->aux_data) {
5818 sctp_m_free(control->aux_data);
5819 control->aux_data = NULL;
5820 }
5821 no_rcv_needed = control->do_not_ref_stcb;
5822 sctp_free_remote_addr(control->whoFrom);
5823 control->data = NULL;
5824 sctp_free_a_readq(stcb, control);
5825 control = NULL;
5826 if ((freed_so_far >= rwnd_req) &&
5827 (no_rcv_needed == 0))
5828 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5829
5830 } else {
5831 /*
5832 * The user did not read all of this
5833 * message, turn off the returned MSG_EOR
5834 * since we are leaving more behind on the
5835 * control to read.
5836 */
5837 #ifdef INVARIANTS
5838 if (control->end_added &&
5839 (control->data == NULL) &&
5840 (control->tail_mbuf == NULL)) {
5841 panic("Gak, control->length is corrupt?");
5842 }
5843 #endif
5844 no_rcv_needed = control->do_not_ref_stcb;
5845 out_flags &= ~MSG_EOR;
5846 }
5847 }
5848 if (out_flags & MSG_EOR) {
5849 goto release;
5850 }
5851 if ((uio->uio_resid == 0) ||
5852 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5853 ) {
5854 goto release;
5855 }
5856 /*
5857 * If I hit here the receiver wants more and this message is
5858 * NOT done (pd-api). So two questions. Can we block? if not
5859 * we are done. Did the user NOT set MSG_WAITALL?
5860 */
5861 if (block_allowed == 0) {
5862 goto release;
5863 }
5864 /*
5865 * We need to wait for more data a few things: - We don't
5866 * sbunlock() so we don't get someone else reading. - We
5867 * must be sure to account for the case where what is added
5868 * is NOT to our control when we wakeup.
5869 */
5870
5871 /*
5872 * Do we need to tell the transport a rwnd update might be
5873 * needed before we go to sleep?
5874 */
5875 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5876 ((freed_so_far >= rwnd_req) &&
5877 (control->do_not_ref_stcb == 0) &&
5878 (no_rcv_needed == 0))) {
5879 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5880 }
5881 wait_some_more:
5882 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5883 goto release;
5884 }
5885 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5886 goto release;
5887
5888 if (hold_rlock == 1) {
5889 SCTP_INP_READ_UNLOCK(inp);
5890 hold_rlock = 0;
5891 }
5892 if (hold_sblock == 0) {
5893 SOCKBUF_LOCK(&so->so_rcv);
5894 hold_sblock = 1;
5895 }
5896 if ((copied_so_far) && (control->length == 0) &&
5897 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5898 goto release;
5899 }
5900 if (so->so_rcv.sb_cc <= control->held_length) {
5901 error = sbwait(&so->so_rcv);
5902 if (error) {
5903 goto release;
5904 }
5905 control->held_length = 0;
5906 }
5907 if (hold_sblock) {
5908 SOCKBUF_UNLOCK(&so->so_rcv);
5909 hold_sblock = 0;
5910 }
5911 if (control->length == 0) {
5912 /* still nothing here */
5913 if (control->end_added == 1) {
5914 /* he aborted, or is done i.e.did a shutdown */
5915 out_flags |= MSG_EOR;
5916 if (control->pdapi_aborted) {
5917 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5918 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5919
5920 out_flags |= MSG_TRUNC;
5921 } else {
5922 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5923 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5924 }
5925 goto done_with_control;
5926 }
5927 if (so->so_rcv.sb_cc > held_length) {
5928 control->held_length = so->so_rcv.sb_cc;
5929 held_length = 0;
5930 }
5931 goto wait_some_more;
5932 } else if (control->data == NULL) {
5933 /*
5934 * we must re-sync since data is probably being
5935 * added
5936 */
5937 SCTP_INP_READ_LOCK(inp);
5938 if ((control->length > 0) && (control->data == NULL)) {
5939 /*
5940 * big trouble.. we have the lock and its
5941 * corrupt?
5942 */
5943 #ifdef INVARIANTS
5944 panic("Impossible data==NULL length !=0");
5945 #endif
5946 out_flags |= MSG_EOR;
5947 out_flags |= MSG_TRUNC;
5948 control->length = 0;
5949 SCTP_INP_READ_UNLOCK(inp);
5950 goto done_with_control;
5951 }
5952 SCTP_INP_READ_UNLOCK(inp);
5953 /* We will fall around to get more data */
5954 }
5955 goto get_more_data;
5956 } else {
5957 /*-
5958 * Give caller back the mbuf chain,
5959 * store in uio_resid the length
5960 */
5961 wakeup_read_socket = 0;
5962 if ((control->end_added == 0) ||
5963 (TAILQ_NEXT(control, next) == NULL)) {
5964 /* Need to get rlock */
5965 if (hold_rlock == 0) {
5966 SCTP_INP_READ_LOCK(inp);
5967 hold_rlock = 1;
5968 }
5969 }
5970 if (control->end_added) {
5971 out_flags |= MSG_EOR;
5972 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5973 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5974 }
5975 if (control->spec_flags & M_NOTIFICATION) {
5976 out_flags |= MSG_NOTIFICATION;
5977 }
5978 uio->uio_resid = control->length;
5979 *mp = control->data;
5980 m = control->data;
5981 while (m) {
5982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5983 sctp_sblog(&so->so_rcv,
5984 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5985 }
5986 sctp_sbfree(control, stcb, &so->so_rcv, m);
5987 freed_so_far += SCTP_BUF_LEN(m);
5988 freed_so_far += MSIZE;
5989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5990 sctp_sblog(&so->so_rcv,
5991 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5992 }
5993 m = SCTP_BUF_NEXT(m);
5994 }
5995 control->data = control->tail_mbuf = NULL;
5996 control->length = 0;
5997 if (out_flags & MSG_EOR) {
5998 /* Done with this control */
5999 goto done_with_control;
6000 }
6001 }
6002 release:
6003 if (hold_rlock == 1) {
6004 SCTP_INP_READ_UNLOCK(inp);
6005 hold_rlock = 0;
6006 }
6007 if (hold_sblock == 1) {
6008 SOCKBUF_UNLOCK(&so->so_rcv);
6009 hold_sblock = 0;
6010 }
6011 sbunlock(&so->so_rcv);
6012 sockbuf_lock = 0;
6013
6014 release_unlocked:
6015 if (hold_sblock) {
6016 SOCKBUF_UNLOCK(&so->so_rcv);
6017 hold_sblock = 0;
6018 }
6019 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6020 if ((freed_so_far >= rwnd_req) &&
6021 (control && (control->do_not_ref_stcb == 0)) &&
6022 (no_rcv_needed == 0))
6023 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6024 }
6025 out:
6026 if (msg_flags) {
6027 *msg_flags = out_flags;
6028 }
6029 if (((out_flags & MSG_EOR) == 0) &&
6030 ((in_flags & MSG_PEEK) == 0) &&
6031 (sinfo) &&
6032 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6033 struct sctp_extrcvinfo *s_extra;
6034
6035 s_extra = (struct sctp_extrcvinfo *)sinfo;
6036 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6037 }
6038 if (hold_rlock == 1) {
6039 SCTP_INP_READ_UNLOCK(inp);
6040 hold_rlock = 0;
6041 }
6042 if (hold_sblock) {
6043 SOCKBUF_UNLOCK(&so->so_rcv);
6044 hold_sblock = 0;
6045 }
6046 if (sockbuf_lock) {
6047 sbunlock(&so->so_rcv);
6048 }
6049 if (freecnt_applied) {
6050 /*
6051 * The lock on the socket buffer protects us so the free
6052 * code will stop. But since we used the socketbuf lock and
6053 * the sender uses the tcb_lock to increment, we need to use
6054 * the atomic add to the refcnt.
6055 */
6056 if (stcb == NULL) {
6057 #ifdef INVARIANTS
6058 panic("stcb for refcnt has gone NULL?");
6059 goto stage_left;
6060 #else
6061 goto stage_left;
6062 #endif
6063 }
6064 atomic_add_int(&stcb->asoc.refcnt, -1);
6065 freecnt_applied = 0;
6066 /* Save the value back for next time */
6067 stcb->freed_by_sorcv_sincelast = freed_so_far;
6068 }
6069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6070 if (stcb) {
6071 sctp_misc_ints(SCTP_SORECV_DONE,
6072 freed_so_far,
6073 ((uio) ? (slen - uio->uio_resid) : slen),
6074 stcb->asoc.my_rwnd,
6075 so->so_rcv.sb_cc);
6076 } else {
6077 sctp_misc_ints(SCTP_SORECV_DONE,
6078 freed_so_far,
6079 ((uio) ? (slen - uio->uio_resid) : slen),
6080 0,
6081 so->so_rcv.sb_cc);
6082 }
6083 }
6084 stage_left:
6085 if (wakeup_read_socket) {
6086 sctp_sorwakeup(inp, so);
6087 }
6088 return (error);
6089 }
6090
6091
6092 #ifdef SCTP_MBUF_LOGGING
6093 struct mbuf *
6094 sctp_m_free(struct mbuf *m)
6095 {
6096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6097 if (SCTP_BUF_IS_EXTENDED(m)) {
6098 sctp_log_mb(m, SCTP_MBUF_IFREE);
6099 }
6100 }
6101 return (m_free(m));
6102 }
6103
6104 void
6105 sctp_m_freem(struct mbuf *mb)
6106 {
6107 while (mb != NULL)
6108 mb = sctp_m_free(mb);
6109 }
6110
6111 #endif
6112
6113 int
6114 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6115 {
6116 /*
6117 * Given a local address. For all associations that holds the
6118 * address, request a peer-set-primary.
6119 */
6120 struct sctp_ifa *ifa;
6121 struct sctp_laddr *wi;
6122
6123 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6124 if (ifa == NULL) {
6125 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6126 return (EADDRNOTAVAIL);
6127 }
6128 /*
6129 * Now that we have the ifa we must awaken the iterator with this
6130 * message.
6131 */
6132 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6133 if (wi == NULL) {
6134 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6135 return (ENOMEM);
6136 }
6137 /* Now incr the count and int wi structure */
6138 SCTP_INCR_LADDR_COUNT();
6139 bzero(wi, sizeof(*wi));
6140 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6141 wi->ifa = ifa;
6142 wi->action = SCTP_SET_PRIM_ADDR;
6143 atomic_add_int(&ifa->refcount, 1);
6144
6145 /* Now add it to the work queue */
6146 SCTP_WQ_ADDR_LOCK();
6147 /*
6148 * Should this really be a tailq? As it is we will process the
6149 * newest first :-0
6150 */
6151 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6152 SCTP_WQ_ADDR_UNLOCK();
6153 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6154 (struct sctp_inpcb *)NULL,
6155 (struct sctp_tcb *)NULL,
6156 (struct sctp_nets *)NULL);
6157 return (0);
6158 }
6159
6160
6161 int
6162 sctp_soreceive(struct socket *so,
6163 struct sockaddr **psa,
6164 struct uio *uio,
6165 struct mbuf **mp0,
6166 struct mbuf **controlp,
6167 int *flagsp)
6168 {
6169 int error, fromlen;
6170 uint8_t sockbuf[256];
6171 struct sockaddr *from;
6172 struct sctp_extrcvinfo sinfo;
6173 int filling_sinfo = 1;
6174 struct sctp_inpcb *inp;
6175
6176 inp = (struct sctp_inpcb *)so->so_pcb;
6177 /* pickup the assoc we are reading from */
6178 if (inp == NULL) {
6179 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6180 return (EINVAL);
6181 }
6182 if ((sctp_is_feature_off(inp,
6183 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6184 (controlp == NULL)) {
6185 /* user does not want the sndrcv ctl */
6186 filling_sinfo = 0;
6187 }
6188 if (psa) {
6189 from = (struct sockaddr *)sockbuf;
6190 fromlen = sizeof(sockbuf);
6191 from->sa_len = 0;
6192 } else {
6193 from = NULL;
6194 fromlen = 0;
6195 }
6196
6197 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6198 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6199 if ((controlp) && (filling_sinfo)) {
6200 /* copy back the sinfo in a CMSG format */
6201 if (filling_sinfo)
6202 *controlp = sctp_build_ctl_nchunk(inp,
6203 (struct sctp_sndrcvinfo *)&sinfo);
6204 else
6205 *controlp = NULL;
6206 }
6207 if (psa) {
6208 /* copy back the address info */
6209 if (from && from->sa_len) {
6210 *psa = sodupsockaddr(from, M_NOWAIT);
6211 } else {
6212 *psa = NULL;
6213 }
6214 }
6215 return (error);
6216 }
6217
6218
6219 int
6220 sctp_l_soreceive(struct socket *so,
6221 struct sockaddr **name,
6222 struct uio *uio,
6223 char **controlp,
6224 int *controllen,
6225 int *flag)
6226 {
6227 int error, fromlen;
6228 uint8_t sockbuf[256];
6229 struct sockaddr *from;
6230 struct sctp_extrcvinfo sinfo;
6231 int filling_sinfo = 1;
6232 struct sctp_inpcb *inp;
6233
6234 inp = (struct sctp_inpcb *)so->so_pcb;
6235 /* pickup the assoc we are reading from */
6236 if (inp == NULL) {
6237 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6238 return (EINVAL);
6239 }
6240 if ((sctp_is_feature_off(inp,
6241 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6242 (controlp == NULL)) {
6243 /* user does not want the sndrcv ctl */
6244 filling_sinfo = 0;
6245 }
6246 if (name) {
6247 from = (struct sockaddr *)sockbuf;
6248 fromlen = sizeof(sockbuf);
6249 from->sa_len = 0;
6250 } else {
6251 from = NULL;
6252 fromlen = 0;
6253 }
6254
6255 error = sctp_sorecvmsg(so, uio,
6256 (struct mbuf **)NULL,
6257 from, fromlen, flag,
6258 (struct sctp_sndrcvinfo *)&sinfo,
6259 filling_sinfo);
6260 if ((controlp) && (filling_sinfo)) {
6261 /*
6262 * copy back the sinfo in a CMSG format note that the caller
6263 * has reponsibility for freeing the memory.
6264 */
6265 if (filling_sinfo)
6266 *controlp = sctp_build_ctl_cchunk(inp,
6267 controllen,
6268 (struct sctp_sndrcvinfo *)&sinfo);
6269 }
6270 if (name) {
6271 /* copy back the address info */
6272 if (from && from->sa_len) {
6273 *name = sodupsockaddr(from, M_WAIT);
6274 } else {
6275 *name = NULL;
6276 }
6277 }
6278 return (error);
6279 }
6280
6281
6282
6283
6284
6285
6286
6287 int
6288 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6289 int totaddr, int *error)
6290 {
6291 int added = 0;
6292 int i;
6293 struct sctp_inpcb *inp;
6294 struct sockaddr *sa;
6295 size_t incr = 0;
6296
6297 sa = addr;
6298 inp = stcb->sctp_ep;
6299 *error = 0;
6300 for (i = 0; i < totaddr; i++) {
6301 if (sa->sa_family == AF_INET) {
6302 incr = sizeof(struct sockaddr_in);
6303 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6304 /* assoc gone no un-lock */
6305 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6306 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6307 *error = ENOBUFS;
6308 goto out_now;
6309 }
6310 added++;
6311 } else if (sa->sa_family == AF_INET6) {
6312 incr = sizeof(struct sockaddr_in6);
6313 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6314 /* assoc gone no un-lock */
6315 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6316 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6317 *error = ENOBUFS;
6318 goto out_now;
6319 }
6320 added++;
6321 }
6322 sa = (struct sockaddr *)((caddr_t)sa + incr);
6323 }
6324 out_now:
6325 return (added);
6326 }
6327
6328 struct sctp_tcb *
6329 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6330 int *totaddr, int *num_v4, int *num_v6, int *error,
6331 int limit, int *bad_addr)
6332 {
6333 struct sockaddr *sa;
6334 struct sctp_tcb *stcb = NULL;
6335 size_t incr, at, i;
6336
6337 at = incr = 0;
6338 sa = addr;
6339 *error = *num_v6 = *num_v4 = 0;
6340 /* account and validate addresses */
6341 for (i = 0; i < (size_t)*totaddr; i++) {
6342 if (sa->sa_family == AF_INET) {
6343 (*num_v4) += 1;
6344 incr = sizeof(struct sockaddr_in);
6345 if (sa->sa_len != incr) {
6346 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6347 *error = EINVAL;
6348 *bad_addr = 1;
6349 return (NULL);
6350 }
6351 } else if (sa->sa_family == AF_INET6) {
6352 struct sockaddr_in6 *sin6;
6353
6354 sin6 = (struct sockaddr_in6 *)sa;
6355 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6356 /* Must be non-mapped for connectx */
6357 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358 *error = EINVAL;
6359 *bad_addr = 1;
6360 return (NULL);
6361 }
6362 (*num_v6) += 1;
6363 incr = sizeof(struct sockaddr_in6);
6364 if (sa->sa_len != incr) {
6365 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6366 *error = EINVAL;
6367 *bad_addr = 1;
6368 return (NULL);
6369 }
6370 } else {
6371 *totaddr = i;
6372 /* we are done */
6373 break;
6374 }
6375 SCTP_INP_INCR_REF(inp);
6376 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6377 if (stcb != NULL) {
6378 /* Already have or am bring up an association */
6379 return (stcb);
6380 } else {
6381 SCTP_INP_DECR_REF(inp);
6382 }
6383 if ((at + incr) > (size_t)limit) {
6384 *totaddr = i;
6385 break;
6386 }
6387 sa = (struct sockaddr *)((caddr_t)sa + incr);
6388 }
6389 return ((struct sctp_tcb *)NULL);
6390 }
6391
6392 /*
6393 * sctp_bindx(ADD) for one address.
6394 * assumes all arguments are valid/checked by caller.
6395 */
6396 void
6397 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6398 struct sockaddr *sa, sctp_assoc_t assoc_id,
6399 uint32_t vrf_id, int *error, void *p)
6400 {
6401 struct sockaddr *addr_touse;
6402
6403 #ifdef INET6
6404 struct sockaddr_in sin;
6405
6406 #endif
6407
6408 /* see if we're bound all already! */
6409 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6410 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 *error = EINVAL;
6412 return;
6413 }
6414 addr_touse = sa;
6415 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6416 if (sa->sa_family == AF_INET6) {
6417 struct sockaddr_in6 *sin6;
6418
6419 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6420 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6421 *error = EINVAL;
6422 return;
6423 }
6424 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6425 /* can only bind v6 on PF_INET6 sockets */
6426 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 *error = EINVAL;
6428 return;
6429 }
6430 sin6 = (struct sockaddr_in6 *)addr_touse;
6431 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6432 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6433 SCTP_IPV6_V6ONLY(inp)) {
6434 /* can't bind v4-mapped on PF_INET sockets */
6435 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436 *error = EINVAL;
6437 return;
6438 }
6439 in6_sin6_2_sin(&sin, sin6);
6440 addr_touse = (struct sockaddr *)&sin;
6441 }
6442 }
6443 #endif
6444 if (sa->sa_family == AF_INET) {
6445 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6446 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 *error = EINVAL;
6448 return;
6449 }
6450 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6451 SCTP_IPV6_V6ONLY(inp)) {
6452 /* can't bind v4 on PF_INET sockets */
6453 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 *error = EINVAL;
6455 return;
6456 }
6457 }
6458 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6459 if (p == NULL) {
6460 /* Can't get proc for Net/Open BSD */
6461 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 *error = EINVAL;
6463 return;
6464 }
6465 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6466 return;
6467 }
6468 /*
6469 * No locks required here since bind and mgmt_ep_sa all do their own
6470 * locking. If we do something for the FIX: below we may need to
6471 * lock in that case.
6472 */
6473 if (assoc_id == 0) {
6474 /* add the address */
6475 struct sctp_inpcb *lep;
6476 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6477
6478 /* validate the incoming port */
6479 if ((lsin->sin_port != 0) &&
6480 (lsin->sin_port != inp->sctp_lport)) {
6481 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6482 *error = EINVAL;
6483 return;
6484 } else {
6485 /* user specified 0 port, set it to existing port */
6486 lsin->sin_port = inp->sctp_lport;
6487 }
6488
6489 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6490 if (lep != NULL) {
6491 /*
6492 * We must decrement the refcount since we have the
6493 * ep already and are binding. No remove going on
6494 * here.
6495 */
6496 SCTP_INP_DECR_REF(lep);
6497 }
6498 if (lep == inp) {
6499 /* already bound to it.. ok */
6500 return;
6501 } else if (lep == NULL) {
6502 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
6503 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6504 SCTP_ADD_IP_ADDRESS,
6505 vrf_id, NULL);
6506 } else {
6507 *error = EADDRINUSE;
6508 }
6509 if (*error)
6510 return;
6511 } else {
6512 /*
6513 * FIX: decide whether we allow assoc based bindx
6514 */
6515 }
6516 }
6517
6518 /*
6519 * sctp_bindx(DELETE) for one address.
6520 * assumes all arguments are valid/checked by caller.
6521 */
6522 void
6523 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6524 struct sockaddr *sa, sctp_assoc_t assoc_id,
6525 uint32_t vrf_id, int *error)
6526 {
6527 struct sockaddr *addr_touse;
6528
6529 #ifdef INET6
6530 struct sockaddr_in sin;
6531
6532 #endif
6533
6534 /* see if we're bound all already! */
6535 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6536 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6537 *error = EINVAL;
6538 return;
6539 }
6540 addr_touse = sa;
6541 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6542 if (sa->sa_family == AF_INET6) {
6543 struct sockaddr_in6 *sin6;
6544
6545 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6546 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547 *error = EINVAL;
6548 return;
6549 }
6550 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6551 /* can only bind v6 on PF_INET6 sockets */
6552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553 *error = EINVAL;
6554 return;
6555 }
6556 sin6 = (struct sockaddr_in6 *)addr_touse;
6557 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6558 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6559 SCTP_IPV6_V6ONLY(inp)) {
6560 /* can't bind mapped-v4 on PF_INET sockets */
6561 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6562 *error = EINVAL;
6563 return;
6564 }
6565 in6_sin6_2_sin(&sin, sin6);
6566 addr_touse = (struct sockaddr *)&sin;
6567 }
6568 }
6569 #endif
6570 if (sa->sa_family == AF_INET) {
6571 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6572 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573 *error = EINVAL;
6574 return;
6575 }
6576 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6577 SCTP_IPV6_V6ONLY(inp)) {
6578 /* can't bind v4 on PF_INET sockets */
6579 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6580 *error = EINVAL;
6581 return;
6582 }
6583 }
6584 /*
6585 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6586 * below is ever changed we may need to lock before calling
6587 * association level binding.
6588 */
6589 if (assoc_id == 0) {
6590 /* delete the address */
6591 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6592 SCTP_DEL_IP_ADDRESS,
6593 vrf_id, NULL);
6594 } else {
6595 /*
6596 * FIX: decide whether we allow assoc based bindx
6597 */
6598 }
6599 }
6600
6601 /*
6602 * returns the valid local address count for an assoc, taking into account
6603 * all scoping rules
6604 */
6605 int
6606 sctp_local_addr_count(struct sctp_tcb *stcb)
6607 {
6608 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6609 int ipv4_addr_legal, ipv6_addr_legal;
6610 struct sctp_vrf *vrf;
6611 struct sctp_ifn *sctp_ifn;
6612 struct sctp_ifa *sctp_ifa;
6613 int count = 0;
6614
6615 /* Turn on all the appropriate scopes */
6616 loopback_scope = stcb->asoc.loopback_scope;
6617 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6618 local_scope = stcb->asoc.local_scope;
6619 site_scope = stcb->asoc.site_scope;
6620 ipv4_addr_legal = ipv6_addr_legal = 0;
6621 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6622 ipv6_addr_legal = 1;
6623 if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6624 ipv4_addr_legal = 1;
6625 }
6626 } else {
6627 ipv4_addr_legal = 1;
6628 }
6629
6630 SCTP_IPI_ADDR_RLOCK();
6631 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6632 if (vrf == NULL) {
6633 /* no vrf, no addresses */
6634 SCTP_IPI_ADDR_RUNLOCK();
6635 return (0);
6636 }
6637 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6638 /*
6639 * bound all case: go through all ifns on the vrf
6640 */
6641 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6642 if ((loopback_scope == 0) &&
6643 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6644 continue;
6645 }
6646 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6647 if (sctp_is_addr_restricted(stcb, sctp_ifa))
6648 continue;
6649 switch (sctp_ifa->address.sa.sa_family) {
6650 case AF_INET:
6651 if (ipv4_addr_legal) {
6652 struct sockaddr_in *sin;
6653
6654 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6655 if (sin->sin_addr.s_addr == 0) {
6656 /*
6657 * skip unspecified
6658 * addrs
6659 */
6660 continue;
6661 }
6662 if ((ipv4_local_scope == 0) &&
6663 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6664 continue;
6665 }
6666 /* count this one */
6667 count++;
6668 } else {
6669 continue;
6670 }
6671 break;
6672 #ifdef INET6
6673 case AF_INET6:
6674 if (ipv6_addr_legal) {
6675 struct sockaddr_in6 *sin6;
6676
6677 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6678 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6679 continue;
6680 }
6681 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6682 if (local_scope == 0)
6683 continue;
6684 if (sin6->sin6_scope_id == 0) {
6685 if (sa6_recoverscope(sin6) != 0)
6686 /*
6687 *
6688 * bad
6689 *
6690 * li
6691 * nk
6692 *
6693 * loc
6694 * al
6695 *
6696 * add
6697 * re
6698 * ss
6699 * */
6700 continue;
6701 }
6702 }
6703 if ((site_scope == 0) &&
6704 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6705 continue;
6706 }
6707 /* count this one */
6708 count++;
6709 }
6710 break;
6711 #endif
6712 default:
6713 /* TSNH */
6714 break;
6715 }
6716 }
6717 }
6718 } else {
6719 /*
6720 * subset bound case
6721 */
6722 struct sctp_laddr *laddr;
6723
6724 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6725 sctp_nxt_addr) {
6726 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6727 continue;
6728 }
6729 /* count this one */
6730 count++;
6731 }
6732 }
6733 SCTP_IPI_ADDR_RUNLOCK();
6734 return (count);
6735 }
6736
6737 #if defined(SCTP_LOCAL_TRACE_BUF)
6738
6739 void
6740 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6741 {
6742 uint32_t saveindex, newindex;
6743
6744 do {
6745 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6746 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6747 newindex = 1;
6748 } else {
6749 newindex = saveindex + 1;
6750 }
6751 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6752 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6753 saveindex = 0;
6754 }
6755 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6756 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6757 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6758 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6759 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6760 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6761 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6762 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6763 }
6764
6765 #endif
6766 /* We will need to add support
6767 * to bind the ports and such here
6768 * so we can do UDP tunneling. In
6769 * the mean-time, we return error
6770 */
6771 #include <netinet/udp.h>
6772 #include <netinet/udp_var.h>
6773 #include <sys/proc.h>
6774 #ifdef INET6
6775 #include <netinet6/sctp6_var.h>
6776 #endif
6777
6778 static void
6779 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6780 {
6781 struct ip *iph;
6782 struct mbuf *sp, *last;
6783 struct udphdr *uhdr;
6784 uint16_t port = 0, len;
6785 int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6786
6787 /*
6788 * Split out the mbuf chain. Leave the IP header in m, place the
6789 * rest in the sp.
6790 */
6791 if ((m->m_flags & M_PKTHDR) == 0) {
6792 /* Can't handle one that is not a pkt hdr */
6793 goto out;
6794 }
6795 /* pull the src port */
6796 iph = mtod(m, struct ip *);
6797 uhdr = (struct udphdr *)((caddr_t)iph + off);
6798
6799 port = uhdr->uh_sport;
6800 sp = m_split(m, off, M_DONTWAIT);
6801 if (sp == NULL) {
6802 /* Gak, drop packet, we can't do a split */
6803 goto out;
6804 }
6805 if (sp->m_pkthdr.len < header_size) {
6806 /* Gak, packet can't have an SCTP header in it - to small */
6807 m_freem(sp);
6808 goto out;
6809 }
6810 /* ok now pull up the UDP header and SCTP header together */
6811 sp = m_pullup(sp, header_size);
6812 if (sp == NULL) {
6813 /* Gak pullup failed */
6814 goto out;
6815 }
6816 /* trim out the UDP header */
6817 m_adj(sp, sizeof(struct udphdr));
6818
6819 /* Now reconstruct the mbuf chain */
6820 /* 1) find last one */
6821 last = m;
6822 while (last->m_next != NULL) {
6823 last = last->m_next;
6824 }
6825 last->m_next = sp;
6826 m->m_pkthdr.len += sp->m_pkthdr.len;
6827 last = m;
6828 while (last != NULL) {
6829 last = last->m_next;
6830 }
6831 /* Now its ready for sctp_input or sctp6_input */
6832 iph = mtod(m, struct ip *);
6833 switch (iph->ip_v) {
6834 case IPVERSION:
6835 {
6836 /* its IPv4 */
6837 len = SCTP_GET_IPV4_LENGTH(iph);
6838 len -= sizeof(struct udphdr);
6839 SCTP_GET_IPV4_LENGTH(iph) = len;
6840 sctp_input_with_port(m, off, port);
6841 break;
6842 }
6843 #ifdef INET6
6844 case IPV6_VERSION >> 4:
6845 {
6846 /* its IPv6 - NOT supported */
6847 goto out;
6848 break;
6849
6850 }
6851 #endif
6852 default:
6853 {
6854 m_freem(m);
6855 break;
6856 }
6857 }
6858 return;
6859 out:
6860 m_freem(m);
6861 }
6862
6863 void
6864 sctp_over_udp_stop(void)
6865 {
6866 struct socket *sop;
6867
6868 /*
6869 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6870 * for writting!
6871 */
6872 if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6873 /* Nothing to do */
6874 return;
6875 }
6876 sop = SCTP_BASE_INFO(udp_tun_socket);
6877 soclose(sop);
6878 SCTP_BASE_INFO(udp_tun_socket) = NULL;
6879 }
6880 int
6881 sctp_over_udp_start(void)
6882 {
6883 uint16_t port;
6884 int ret;
6885 struct sockaddr_in sin;
6886 struct socket *sop = NULL;
6887 struct thread *th;
6888 struct ucred *cred;
6889
6890 /*
6891 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6892 * for writting!
6893 */
6894 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6895 if (port == 0) {
6896 /* Must have a port set */
6897 return (EINVAL);
6898 }
6899 if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6900 /* Already running -- must stop first */
6901 return (EALREADY);
6902 }
6903 th = curthread;
6904 cred = th->td_ucred;
6905 if ((ret = socreate(PF_INET, &sop,
6906 SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6907 return (ret);
6908 }
6909 SCTP_BASE_INFO(udp_tun_socket) = sop;
6910 /* call the special UDP hook */
6911 ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6912 if (ret) {
6913 goto exit_stage_left;
6914 }
6915 /* Ok we have a socket, bind it to the port */
6916 memset(&sin, 0, sizeof(sin));
6917 sin.sin_len = sizeof(sin);
6918 sin.sin_family = AF_INET;
6919 sin.sin_port = htons(port);
6920 ret = sobind(sop, (struct sockaddr *)&sin, th);
6921 if (ret) {
6922 /* Close up we cant get the port */
6923 exit_stage_left:
6924 sctp_over_udp_stop();
6925 return (ret);
6926 }
6927 /*
6928 * Ok we should now get UDP packets directly to our input routine
6929 * sctp_recv_upd_tunneled_packet().
6930 */
6931 return (0);
6932 }
Cache object: 9307cf0c19b41097c3e1ea3a76cb2fd5
|