1 /*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_crc32.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_cc_functions.h>
52
53 #define NUMBER_OF_MTU_SIZES 18
54
55
56 #ifndef KTR_SCTP
57 #define KTR_SCTP KTR_SUBSYS
58 #endif
59
60 void
61 sctp_sblog(struct sockbuf *sb,
62 struct sctp_tcb *stcb, int from, int incr)
63 {
64 struct sctp_cwnd_log sctp_clog;
65
66 sctp_clog.x.sb.stcb = stcb;
67 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 if (stcb)
69 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 else
71 sctp_clog.x.sb.stcb_sbcc = 0;
72 sctp_clog.x.sb.incr = incr;
73 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 SCTP_LOG_EVENT_SB,
75 from,
76 sctp_clog.x.misc.log1,
77 sctp_clog.x.misc.log2,
78 sctp_clog.x.misc.log3,
79 sctp_clog.x.misc.log4);
80 }
81
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 struct sctp_cwnd_log sctp_clog;
86
87 sctp_clog.x.close.inp = (void *)inp;
88 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 if (stcb) {
90 sctp_clog.x.close.stcb = (void *)stcb;
91 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 } else {
93 sctp_clog.x.close.stcb = 0;
94 sctp_clog.x.close.state = 0;
95 }
96 sctp_clog.x.close.loc = loc;
97 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 SCTP_LOG_EVENT_CLOSE,
99 0,
100 sctp_clog.x.misc.log1,
101 sctp_clog.x.misc.log2,
102 sctp_clog.x.misc.log3,
103 sctp_clog.x.misc.log4);
104 }
105
106
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 struct sctp_cwnd_log sctp_clog;
111
112 memset(&sctp_clog, 0, sizeof(sctp_clog));
113 sctp_clog.x.rto.net = (void *)net;
114 sctp_clog.x.rto.rtt = net->prev_rtt;
115 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 SCTP_LOG_EVENT_RTT,
117 from,
118 sctp_clog.x.misc.log1,
119 sctp_clog.x.misc.log2,
120 sctp_clog.x.misc.log3,
121 sctp_clog.x.misc.log4);
122
123 }
124
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 struct sctp_cwnd_log sctp_clog;
129
130 sctp_clog.x.strlog.stcb = stcb;
131 sctp_clog.x.strlog.n_tsn = tsn;
132 sctp_clog.x.strlog.n_sseq = sseq;
133 sctp_clog.x.strlog.e_tsn = 0;
134 sctp_clog.x.strlog.e_sseq = 0;
135 sctp_clog.x.strlog.strm = stream;
136 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 SCTP_LOG_EVENT_STRM,
138 from,
139 sctp_clog.x.misc.log1,
140 sctp_clog.x.misc.log2,
141 sctp_clog.x.misc.log3,
142 sctp_clog.x.misc.log4);
143
144 }
145
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 struct sctp_cwnd_log sctp_clog;
150
151 sctp_clog.x.nagle.stcb = (void *)stcb;
152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 SCTP_LOG_EVENT_NAGLE,
158 action,
159 sctp_clog.x.misc.log1,
160 sctp_clog.x.misc.log2,
161 sctp_clog.x.misc.log3,
162 sctp_clog.x.misc.log4);
163 }
164
165
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 struct sctp_cwnd_log sctp_clog;
170
171 sctp_clog.x.sack.cumack = cumack;
172 sctp_clog.x.sack.oldcumack = old_cumack;
173 sctp_clog.x.sack.tsn = tsn;
174 sctp_clog.x.sack.numGaps = gaps;
175 sctp_clog.x.sack.numDups = dups;
176 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 SCTP_LOG_EVENT_SACK,
178 from,
179 sctp_clog.x.misc.log1,
180 sctp_clog.x.misc.log2,
181 sctp_clog.x.misc.log3,
182 sctp_clog.x.misc.log4);
183 }
184
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 struct sctp_cwnd_log sctp_clog;
189
190 memset(&sctp_clog, 0, sizeof(sctp_clog));
191 sctp_clog.x.map.base = map;
192 sctp_clog.x.map.cum = cum;
193 sctp_clog.x.map.high = high;
194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 SCTP_LOG_EVENT_MAP,
196 from,
197 sctp_clog.x.misc.log1,
198 sctp_clog.x.misc.log2,
199 sctp_clog.x.misc.log3,
200 sctp_clog.x.misc.log4);
201 }
202
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205 int from)
206 {
207 struct sctp_cwnd_log sctp_clog;
208
209 memset(&sctp_clog, 0, sizeof(sctp_clog));
210 sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 sctp_clog.x.fr.tsn = tsn;
213 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 SCTP_LOG_EVENT_FR,
215 from,
216 sctp_clog.x.misc.log1,
217 sctp_clog.x.misc.log2,
218 sctp_clog.x.misc.log3,
219 sctp_clog.x.misc.log4);
220
221 }
222
223
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 struct sctp_cwnd_log sctp_clog;
228
229 sctp_clog.x.mb.mp = m;
230 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 if (SCTP_BUF_IS_EXTENDED(m)) {
234 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 } else {
237 sctp_clog.x.mb.ext = 0;
238 sctp_clog.x.mb.refcnt = 0;
239 }
240 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 SCTP_LOG_EVENT_MBUF,
242 from,
243 sctp_clog.x.misc.log1,
244 sctp_clog.x.misc.log2,
245 sctp_clog.x.misc.log3,
246 sctp_clog.x.misc.log4);
247 }
248
249
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252 int from)
253 {
254 struct sctp_cwnd_log sctp_clog;
255
256 if (control == NULL) {
257 SCTP_PRINTF("Gak log of NULL?\n");
258 return;
259 }
260 sctp_clog.x.strlog.stcb = control->stcb;
261 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 sctp_clog.x.strlog.strm = control->sinfo_stream;
264 if (poschk != NULL) {
265 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 } else {
268 sctp_clog.x.strlog.e_tsn = 0;
269 sctp_clog.x.strlog.e_sseq = 0;
270 }
271 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 SCTP_LOG_EVENT_STRM,
273 from,
274 sctp_clog.x.misc.log1,
275 sctp_clog.x.misc.log2,
276 sctp_clog.x.misc.log3,
277 sctp_clog.x.misc.log4);
278
279 }
280
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 struct sctp_cwnd_log sctp_clog;
285
286 sctp_clog.x.cwnd.net = net;
287 if (stcb->asoc.send_queue_cnt > 255)
288 sctp_clog.x.cwnd.cnt_in_send = 255;
289 else
290 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 if (stcb->asoc.stream_queue_cnt > 255)
292 sctp_clog.x.cwnd.cnt_in_str = 255;
293 else
294 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295
296 if (net) {
297 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 sctp_clog.x.cwnd.inflight = net->flight_size;
299 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 }
303 if (SCTP_CWNDLOG_PRESEND == from) {
304 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 }
306 sctp_clog.x.cwnd.cwnd_augment = augment;
307 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 SCTP_LOG_EVENT_CWND,
309 from,
310 sctp_clog.x.misc.log1,
311 sctp_clog.x.misc.log2,
312 sctp_clog.x.misc.log3,
313 sctp_clog.x.misc.log4);
314
315 }
316
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 struct sctp_cwnd_log sctp_clog;
321
322 memset(&sctp_clog, 0, sizeof(sctp_clog));
323 if (inp) {
324 sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325
326 } else {
327 sctp_clog.x.lock.sock = (void *)NULL;
328 }
329 sctp_clog.x.lock.inp = (void *)inp;
330 if (stcb) {
331 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 } else {
333 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 }
335 if (inp) {
336 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 } else {
339 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 }
342 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 if (inp->sctp_socket) {
344 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 } else {
348 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 }
352 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 SCTP_LOG_LOCK_EVENT,
354 from,
355 sctp_clog.x.misc.log1,
356 sctp_clog.x.misc.log2,
357 sctp_clog.x.misc.log3,
358 sctp_clog.x.misc.log4);
359
360 }
361
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 struct sctp_cwnd_log sctp_clog;
366
367 memset(&sctp_clog, 0, sizeof(sctp_clog));
368 sctp_clog.x.cwnd.net = net;
369 sctp_clog.x.cwnd.cwnd_new_value = error;
370 sctp_clog.x.cwnd.inflight = net->flight_size;
371 sctp_clog.x.cwnd.cwnd_augment = burst;
372 if (stcb->asoc.send_queue_cnt > 255)
373 sctp_clog.x.cwnd.cnt_in_send = 255;
374 else
375 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 if (stcb->asoc.stream_queue_cnt > 255)
377 sctp_clog.x.cwnd.cnt_in_str = 255;
378 else
379 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 SCTP_LOG_EVENT_MAXBURST,
382 from,
383 sctp_clog.x.misc.log1,
384 sctp_clog.x.misc.log2,
385 sctp_clog.x.misc.log3,
386 sctp_clog.x.misc.log4);
387
388 }
389
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 struct sctp_cwnd_log sctp_clog;
394
395 sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 sctp_clog.x.rwnd.send_size = snd_size;
397 sctp_clog.x.rwnd.overhead = overhead;
398 sctp_clog.x.rwnd.new_rwnd = 0;
399 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 SCTP_LOG_EVENT_RWND,
401 from,
402 sctp_clog.x.misc.log1,
403 sctp_clog.x.misc.log2,
404 sctp_clog.x.misc.log3,
405 sctp_clog.x.misc.log4);
406 }
407
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 struct sctp_cwnd_log sctp_clog;
412
413 sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 sctp_clog.x.rwnd.send_size = flight_size;
415 sctp_clog.x.rwnd.overhead = overhead;
416 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 SCTP_LOG_EVENT_RWND,
419 from,
420 sctp_clog.x.misc.log1,
421 sctp_clog.x.misc.log2,
422 sctp_clog.x.misc.log3,
423 sctp_clog.x.misc.log4);
424 }
425
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 struct sctp_cwnd_log sctp_clog;
430
431 sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 sctp_clog.x.mbcnt.size_change = book;
433 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 SCTP_LOG_EVENT_MBCNT,
437 from,
438 sctp_clog.x.misc.log1,
439 sctp_clog.x.misc.log2,
440 sctp_clog.x.misc.log3,
441 sctp_clog.x.misc.log4);
442
443 }
444
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 SCTP_LOG_MISC_EVENT,
450 from,
451 a, b, c, d);
452 }
453
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 struct sctp_cwnd_log sctp_clog;
458
459 sctp_clog.x.wake.stcb = (void *)stcb;
460 sctp_clog.x.wake.wake_cnt = wake_cnt;
461 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464
465 if (stcb->asoc.stream_queue_cnt < 0xff)
466 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 else
468 sctp_clog.x.wake.stream_qcnt = 0xff;
469
470 if (stcb->asoc.chunks_on_out_queue < 0xff)
471 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 else
473 sctp_clog.x.wake.chunks_on_oque = 0xff;
474
475 sctp_clog.x.wake.sctpflags = 0;
476 /* set in the defered mode stuff */
477 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 sctp_clog.x.wake.sctpflags |= 1;
479 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 sctp_clog.x.wake.sctpflags |= 2;
481 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 sctp_clog.x.wake.sctpflags |= 4;
483 /* what about the sb */
484 if (stcb->sctp_socket) {
485 struct socket *so = stcb->sctp_socket;
486
487 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 } else {
489 sctp_clog.x.wake.sbflags = 0xff;
490 }
491 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 SCTP_LOG_EVENT_WAKE,
493 from,
494 sctp_clog.x.misc.log1,
495 sctp_clog.x.misc.log2,
496 sctp_clog.x.misc.log3,
497 sctp_clog.x.misc.log4);
498
499 }
500
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 struct sctp_cwnd_log sctp_clog;
505
506 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 sctp_clog.x.blk.sndlen = sendlen;
513 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 SCTP_LOG_EVENT_BLOCK,
515 from,
516 sctp_clog.x.misc.log1,
517 sctp_clog.x.misc.log2,
518 sctp_clog.x.misc.log3,
519 sctp_clog.x.misc.log4);
520
521 }
522
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 /* May need to fix this if ktrdump does not work */
527 return (0);
528 }
529
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 int i;
539 int cnt;
540
541 cnt = 0;
542 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 if ((sctp_audit_data[i][0] == 0xe0) &&
544 (sctp_audit_data[i][1] == 0x01)) {
545 cnt = 0;
546 SCTP_PRINTF("\n");
547 } else if (sctp_audit_data[i][0] == 0xf0) {
548 cnt = 0;
549 SCTP_PRINTF("\n");
550 } else if ((sctp_audit_data[i][0] == 0xc0) &&
551 (sctp_audit_data[i][1] == 0x01)) {
552 SCTP_PRINTF("\n");
553 cnt = 0;
554 }
555 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 (uint32_t) sctp_audit_data[i][1]);
557 cnt++;
558 if ((cnt % 14) == 0)
559 SCTP_PRINTF("\n");
560 }
561 for (i = 0; i < sctp_audit_indx; i++) {
562 if ((sctp_audit_data[i][0] == 0xe0) &&
563 (sctp_audit_data[i][1] == 0x01)) {
564 cnt = 0;
565 SCTP_PRINTF("\n");
566 } else if (sctp_audit_data[i][0] == 0xf0) {
567 cnt = 0;
568 SCTP_PRINTF("\n");
569 } else if ((sctp_audit_data[i][0] == 0xc0) &&
570 (sctp_audit_data[i][1] == 0x01)) {
571 SCTP_PRINTF("\n");
572 cnt = 0;
573 }
574 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 (uint32_t) sctp_audit_data[i][1]);
576 cnt++;
577 if ((cnt % 14) == 0)
578 SCTP_PRINTF("\n");
579 }
580 SCTP_PRINTF("\n");
581 }
582
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585 struct sctp_nets *net)
586 {
587 int resend_cnt, tot_out, rep, tot_book_cnt;
588 struct sctp_nets *lnet;
589 struct sctp_tmit_chunk *chk;
590
591 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 sctp_audit_indx++;
594 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 sctp_audit_indx = 0;
596 }
597 if (inp == NULL) {
598 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 sctp_audit_indx++;
601 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 sctp_audit_indx = 0;
603 }
604 return;
605 }
606 if (stcb == NULL) {
607 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 sctp_audit_indx++;
610 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 sctp_audit_indx = 0;
612 }
613 return;
614 }
615 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 sctp_audit_data[sctp_audit_indx][1] =
617 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 sctp_audit_indx++;
619 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 sctp_audit_indx = 0;
621 }
622 rep = 0;
623 tot_book_cnt = 0;
624 resend_cnt = tot_out = 0;
625 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 resend_cnt++;
628 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 tot_out += chk->book_size;
630 tot_book_cnt++;
631 }
632 }
633 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 sctp_audit_indx++;
637 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 sctp_audit_indx = 0;
639 }
640 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 rep = 1;
643 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 sctp_audit_data[sctp_audit_indx][1] =
646 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 sctp_audit_indx++;
648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 sctp_audit_indx = 0;
650 }
651 }
652 if (tot_out != stcb->asoc.total_flight) {
653 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 sctp_audit_indx++;
656 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 sctp_audit_indx = 0;
658 }
659 rep = 1;
660 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 (int)stcb->asoc.total_flight);
662 stcb->asoc.total_flight = tot_out;
663 }
664 if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 sctp_audit_indx++;
668 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 sctp_audit_indx = 0;
670 }
671 rep = 1;
672 SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
673
674 stcb->asoc.total_flight_count = tot_book_cnt;
675 }
676 tot_out = 0;
677 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 tot_out += lnet->flight_size;
679 }
680 if (tot_out != stcb->asoc.total_flight) {
681 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 sctp_audit_indx++;
684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 sctp_audit_indx = 0;
686 }
687 rep = 1;
688 SCTP_PRINTF("real flight:%d net total was %d\n",
689 stcb->asoc.total_flight, tot_out);
690 /* now corrective action */
691 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692
693 tot_out = 0;
694 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 if ((chk->whoTo == lnet) &&
696 (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 tot_out += chk->book_size;
698 }
699 }
700 if (lnet->flight_size != tot_out) {
701 SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
702 (uint32_t) lnet, lnet->flight_size,
703 tot_out);
704 lnet->flight_size = tot_out;
705 }
706 }
707 }
708 if (rep) {
709 sctp_print_audit_report();
710 }
711 }
712
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716
717 sctp_audit_data[sctp_audit_indx][0] = ev;
718 sctp_audit_data[sctp_audit_indx][1] = fd;
719 sctp_audit_indx++;
720 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 sctp_audit_indx = 0;
722 }
723 }
724
725 #endif
726
727 /*
728 * a list of sizes based on typical mtu's, used only if next hop size not
729 * returned.
730 */
731 static int sctp_mtu_sizes[] = {
732 68,
733 296,
734 508,
735 512,
736 544,
737 576,
738 1006,
739 1492,
740 1500,
741 1536,
742 2002,
743 2048,
744 4352,
745 4464,
746 8166,
747 17914,
748 32000,
749 65535
750 };
751
752 void
753 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
754 {
755 struct sctp_association *asoc;
756 struct sctp_nets *net;
757
758 asoc = &stcb->asoc;
759
760 (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
761 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
762 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
763 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
764 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
765 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
766 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
767 (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
768 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
769 }
770 }
771
772 int
773 find_next_best_mtu(int totsz)
774 {
775 int i, perfer;
776
777 /*
778 * if we are in here we must find the next best fit based on the
779 * size of the dg that failed to be sent.
780 */
781 perfer = 0;
782 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
783 if (totsz < sctp_mtu_sizes[i]) {
784 perfer = i - 1;
785 if (perfer < 0)
786 perfer = 0;
787 break;
788 }
789 }
790 return (sctp_mtu_sizes[perfer]);
791 }
792
793 void
794 sctp_fill_random_store(struct sctp_pcb *m)
795 {
796 /*
797 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 * our counter. The result becomes our good random numbers and we
799 * then setup to give these out. Note that we do no locking to
800 * protect this. This is ok, since if competing folks call this we
801 * will get more gobbled gook in the random store which is what we
802 * want. There is a danger that two guys will use the same random
803 * numbers, but thats ok too since that is random as well :->
804 */
805 m->store_at = 0;
806 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
807 sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
808 sizeof(m->random_counter), (uint8_t *) m->random_store);
809 m->random_counter++;
810 }
811
812 uint32_t
813 sctp_select_initial_TSN(struct sctp_pcb *inp)
814 {
815 /*
816 * A true implementation should use random selection process to get
817 * the initial stream sequence number, using RFC1750 as a good
818 * guideline
819 */
820 uint32_t x, *xp;
821 uint8_t *p;
822 int store_at, new_store;
823
824 if (inp->initial_sequence_debug != 0) {
825 uint32_t ret;
826
827 ret = inp->initial_sequence_debug;
828 inp->initial_sequence_debug++;
829 return (ret);
830 }
831 retry:
832 store_at = inp->store_at;
833 new_store = store_at + sizeof(uint32_t);
834 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
835 new_store = 0;
836 }
837 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
838 goto retry;
839 }
840 if (new_store == 0) {
841 /* Refill the random store */
842 sctp_fill_random_store(inp);
843 }
844 p = &inp->random_store[store_at];
845 xp = (uint32_t *) p;
846 x = *xp;
847 return (x);
848 }
849
850 uint32_t
851 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
852 {
853 u_long x, not_done;
854 struct timeval now;
855
856 (void)SCTP_GETTIME_TIMEVAL(&now);
857 not_done = 1;
858 while (not_done) {
859 x = sctp_select_initial_TSN(&inp->sctp_ep);
860 if (x == 0) {
861 /* we never use 0 */
862 continue;
863 }
864 if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
865 not_done = 0;
866 }
867 }
868 return (x);
869 }
870
871 int
872 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
873 int for_a_init, uint32_t override_tag, uint32_t vrf_id)
874 {
875 struct sctp_association *asoc;
876
877 /*
878 * Anything set to zero is taken care of by the allocation routine's
879 * bzero
880 */
881
882 /*
883 * Up front select what scoping to apply on addresses I tell my peer
884 * Not sure what to do with these right now, we will need to come up
885 * with a way to set them. We may need to pass them through from the
886 * caller in the sctp_aloc_assoc() function.
887 */
888 int i;
889
890 asoc = &stcb->asoc;
891 /* init all variables to a known value. */
892 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
893 asoc->max_burst = m->sctp_ep.max_burst;
894 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
895 asoc->cookie_life = m->sctp_ep.def_cookie_life;
896 asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
897 /* EY Init nr_sack variable */
898 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
899 /* JRS 5/21/07 - Init CMT PF variables */
900 asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
901 asoc->sctp_frag_point = m->sctp_frag_point;
902 #ifdef INET
903 asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
904 #else
905 asoc->default_tos = 0;
906 #endif
907
908 #ifdef INET6
909 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
910 #else
911 asoc->default_flowlabel = 0;
912 #endif
913 asoc->sb_send_resv = 0;
914 if (override_tag) {
915 asoc->my_vtag = override_tag;
916 } else {
917 asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
918 }
919 /* Get the nonce tags */
920 asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
922 asoc->vrf_id = vrf_id;
923
924 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
925 asoc->hb_is_disabled = 1;
926 else
927 asoc->hb_is_disabled = 0;
928
929 #ifdef SCTP_ASOCLOG_OF_TSNS
930 asoc->tsn_in_at = 0;
931 asoc->tsn_out_at = 0;
932 asoc->tsn_in_wrapped = 0;
933 asoc->tsn_out_wrapped = 0;
934 asoc->cumack_log_at = 0;
935 asoc->cumack_log_atsnt = 0;
936 #endif
937 #ifdef SCTP_FS_SPEC_LOG
938 asoc->fs_index = 0;
939 #endif
940 asoc->refcnt = 0;
941 asoc->assoc_up_sent = 0;
942 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
943 sctp_select_initial_TSN(&m->sctp_ep);
944 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
945 /* we are optimisitic here */
946 asoc->peer_supports_pktdrop = 1;
947 asoc->peer_supports_nat = 0;
948 asoc->sent_queue_retran_cnt = 0;
949
950 /* for CMT */
951 asoc->last_net_data_came_from = NULL;
952
953 /* This will need to be adjusted */
954 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
955 asoc->last_acked_seq = asoc->init_seq_number - 1;
956 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
957 asoc->asconf_seq_in = asoc->last_acked_seq;
958
959 /* here we are different, we hold the next one we expect */
960 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
961
962 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
963 asoc->initial_rto = m->sctp_ep.initial_rto;
964
965 asoc->max_init_times = m->sctp_ep.max_init_times;
966 asoc->max_send_times = m->sctp_ep.max_send_times;
967 asoc->def_net_failure = m->sctp_ep.def_net_failure;
968 asoc->free_chunk_cnt = 0;
969
970 asoc->iam_blocking = 0;
971 /* ECN Nonce initialization */
972 asoc->context = m->sctp_context;
973 asoc->def_send = m->def_send;
974 asoc->ecn_nonce_allowed = 0;
975 asoc->receiver_nonce_sum = 1;
976 asoc->nonce_sum_expect_base = 1;
977 asoc->nonce_sum_check = 1;
978 asoc->nonce_resync_tsn = 0;
979 asoc->nonce_wait_for_ecne = 0;
980 asoc->nonce_wait_tsn = 0;
981 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
982 asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
983 asoc->pr_sctp_cnt = 0;
984 asoc->total_output_queue_size = 0;
985
986 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
987 struct in6pcb *inp6;
988
989 /* Its a V6 socket */
990 inp6 = (struct in6pcb *)m;
991 asoc->ipv6_addr_legal = 1;
992 /* Now look at the binding flag to see if V4 will be legal */
993 if (SCTP_IPV6_V6ONLY(inp6) == 0) {
994 asoc->ipv4_addr_legal = 1;
995 } else {
996 /* V4 addresses are NOT legal on the association */
997 asoc->ipv4_addr_legal = 0;
998 }
999 } else {
1000 /* Its a V4 socket, no - V6 */
1001 asoc->ipv4_addr_legal = 1;
1002 asoc->ipv6_addr_legal = 0;
1003 }
1004
1005 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1006 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1007
1008 asoc->smallest_mtu = m->sctp_frag_point;
1009 #ifdef SCTP_PRINT_FOR_B_AND_M
1010 SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1011 asoc->smallest_mtu);
1012 #endif
1013 asoc->minrto = m->sctp_ep.sctp_minrto;
1014 asoc->maxrto = m->sctp_ep.sctp_maxrto;
1015
1016 asoc->locked_on_sending = NULL;
1017 asoc->stream_locked_on = 0;
1018 asoc->ecn_echo_cnt_onq = 0;
1019 asoc->stream_locked = 0;
1020
1021 asoc->send_sack = 1;
1022
1023 LIST_INIT(&asoc->sctp_restricted_addrs);
1024
1025 TAILQ_INIT(&asoc->nets);
1026 TAILQ_INIT(&asoc->pending_reply_queue);
1027 TAILQ_INIT(&asoc->asconf_ack_sent);
1028 /* Setup to fill the hb random cache at first HB */
1029 asoc->hb_random_idx = 4;
1030
1031 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1032
1033 /*
1034 * JRS - Pick the default congestion control module based on the
1035 * sysctl.
1036 */
1037 switch (m->sctp_ep.sctp_default_cc_module) {
1038 /* JRS - Standard TCP congestion control */
1039 case SCTP_CC_RFC2581:
1040 {
1041 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1042 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1043 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1044 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1045 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1046 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1047 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1048 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1049 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1050 break;
1051 }
1052 /* JRS - High Speed TCP congestion control (Floyd) */
1053 case SCTP_CC_HSTCP:
1054 {
1055 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1056 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1057 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1058 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1059 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1060 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1061 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1062 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1063 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1064 break;
1065 }
1066 /* JRS - HTCP congestion control */
1067 case SCTP_CC_HTCP:
1068 {
1069 stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1070 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1071 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1072 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1073 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1074 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1075 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1076 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1077 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1078 break;
1079 }
1080 /* JRS - By default, use RFC2581 */
1081 default:
1082 {
1083 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1084 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1085 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1086 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1087 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1088 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1089 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1090 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1091 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1092 break;
1093 }
1094 }
1095
1096 /*
1097 * Now the stream parameters, here we allocate space for all streams
1098 * that we request by default.
1099 */
1100 asoc->streamoutcnt = asoc->pre_open_streams =
1101 m->sctp_ep.pre_open_stream_count;
1102 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1103 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1104 SCTP_M_STRMO);
1105 if (asoc->strmout == NULL) {
1106 /* big trouble no memory */
1107 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1108 return (ENOMEM);
1109 }
1110 for (i = 0; i < asoc->streamoutcnt; i++) {
1111 /*
1112 * inbound side must be set to 0xffff, also NOTE when we get
1113 * the INIT-ACK back (for INIT sender) we MUST reduce the
1114 * count (streamoutcnt) but first check if we sent to any of
1115 * the upper streams that were dropped (if some were). Those
1116 * that were dropped must be notified to the upper layer as
1117 * failed to send.
1118 */
1119 asoc->strmout[i].next_sequence_sent = 0x0;
1120 TAILQ_INIT(&asoc->strmout[i].outqueue);
1121 asoc->strmout[i].stream_no = i;
1122 asoc->strmout[i].last_msg_incomplete = 0;
1123 asoc->strmout[i].next_spoke.tqe_next = 0;
1124 asoc->strmout[i].next_spoke.tqe_prev = 0;
1125 }
1126 /* Now the mapping array */
1127 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1128 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1129 SCTP_M_MAP);
1130 if (asoc->mapping_array == NULL) {
1131 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1132 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1133 return (ENOMEM);
1134 }
1135 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1136 /* EY - initialize the nr_mapping_array just like mapping array */
1137 asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1138 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1139 SCTP_M_MAP);
1140 if (asoc->nr_mapping_array == NULL) {
1141 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1142 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1143 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1144 return (ENOMEM);
1145 }
1146 memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1147
1148 /* Now the init of the other outqueues */
1149 TAILQ_INIT(&asoc->free_chunks);
1150 TAILQ_INIT(&asoc->out_wheel);
1151 TAILQ_INIT(&asoc->control_send_queue);
1152 TAILQ_INIT(&asoc->asconf_send_queue);
1153 TAILQ_INIT(&asoc->send_queue);
1154 TAILQ_INIT(&asoc->sent_queue);
1155 TAILQ_INIT(&asoc->reasmqueue);
1156 TAILQ_INIT(&asoc->resetHead);
1157 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1158 TAILQ_INIT(&asoc->asconf_queue);
1159 /* authentication fields */
1160 asoc->authinfo.random = NULL;
1161 asoc->authinfo.active_keyid = 0;
1162 asoc->authinfo.assoc_key = NULL;
1163 asoc->authinfo.assoc_keyid = 0;
1164 asoc->authinfo.recv_key = NULL;
1165 asoc->authinfo.recv_keyid = 0;
1166 LIST_INIT(&asoc->shared_keys);
1167 asoc->marked_retrans = 0;
1168 asoc->timoinit = 0;
1169 asoc->timodata = 0;
1170 asoc->timosack = 0;
1171 asoc->timoshutdown = 0;
1172 asoc->timoheartbeat = 0;
1173 asoc->timocookie = 0;
1174 asoc->timoshutdownack = 0;
1175 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1176 asoc->discontinuity_time = asoc->start_time;
1177 /*
1178 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1179 * freed later whe the association is freed.
1180 */
1181 return (0);
1182 }
1183
1184 int
1185 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1186 {
1187 /* mapping array needs to grow */
1188 uint8_t *new_array;
1189 uint32_t new_size;
1190
1191 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1192 SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1193 if (new_array == NULL) {
1194 /* can't get more, forget it */
1195 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1196 new_size);
1197 return (-1);
1198 }
1199 memset(new_array, 0, new_size);
1200 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1201 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1202 asoc->mapping_array = new_array;
1203 asoc->mapping_array_size = new_size;
1204 return (0);
1205 }
1206
1207 /* EY - nr_sack version of the above method */
1208 int
1209 sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
1210 {
1211 /* nr mapping array needs to grow */
1212 uint8_t *new_array;
1213 uint32_t new_size;
1214
1215 new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1216 SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1217 if (new_array == NULL) {
1218 /* can't get more, forget it */
1219 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1220 new_size);
1221 return (-1);
1222 }
1223 memset(new_array, 0, new_size);
1224 memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1225 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1226 asoc->nr_mapping_array = new_array;
1227 asoc->nr_mapping_array_size = new_size;
1228 return (0);
1229 }
1230
1231 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1232 static void
1233 sctp_iterator_work(struct sctp_iterator *it)
1234 {
1235 int iteration_count = 0;
1236 int inp_skip = 0;
1237
1238 SCTP_ITERATOR_LOCK();
1239 if (it->inp) {
1240 SCTP_INP_DECR_REF(it->inp);
1241 }
1242 if (it->inp == NULL) {
1243 /* iterator is complete */
1244 done_with_iterator:
1245 SCTP_ITERATOR_UNLOCK();
1246 if (it->function_atend != NULL) {
1247 (*it->function_atend) (it->pointer, it->val);
1248 }
1249 SCTP_FREE(it, SCTP_M_ITER);
1250 return;
1251 }
1252 select_a_new_ep:
1253 SCTP_INP_WLOCK(it->inp);
1254 while (((it->pcb_flags) &&
1255 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1256 ((it->pcb_features) &&
1257 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1258 /* endpoint flags or features don't match, so keep looking */
1259 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1260 SCTP_INP_WUNLOCK(it->inp);
1261 goto done_with_iterator;
1262 }
1263 SCTP_INP_WUNLOCK(it->inp);
1264 it->inp = LIST_NEXT(it->inp, sctp_list);
1265 if (it->inp == NULL) {
1266 goto done_with_iterator;
1267 }
1268 SCTP_INP_WLOCK(it->inp);
1269 }
1270
1271 SCTP_INP_WUNLOCK(it->inp);
1272 SCTP_INP_RLOCK(it->inp);
1273
1274 /* now go through each assoc which is in the desired state */
1275 if (it->done_current_ep == 0) {
1276 if (it->function_inp != NULL)
1277 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1278 it->done_current_ep = 1;
1279 }
1280 if (it->stcb == NULL) {
1281 /* run the per instance function */
1282 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1283 }
1284 if ((inp_skip) || it->stcb == NULL) {
1285 if (it->function_inp_end != NULL) {
1286 inp_skip = (*it->function_inp_end) (it->inp,
1287 it->pointer,
1288 it->val);
1289 }
1290 SCTP_INP_RUNLOCK(it->inp);
1291 goto no_stcb;
1292 }
1293 while (it->stcb) {
1294 SCTP_TCB_LOCK(it->stcb);
1295 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1296 /* not in the right state... keep looking */
1297 SCTP_TCB_UNLOCK(it->stcb);
1298 goto next_assoc;
1299 }
1300 /* see if we have limited out the iterator loop */
1301 iteration_count++;
1302 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1303 /* Pause to let others grab the lock */
1304 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1305 SCTP_TCB_UNLOCK(it->stcb);
1306
1307 SCTP_INP_INCR_REF(it->inp);
1308 SCTP_INP_RUNLOCK(it->inp);
1309 SCTP_ITERATOR_UNLOCK();
1310 SCTP_ITERATOR_LOCK();
1311 SCTP_INP_RLOCK(it->inp);
1312
1313 SCTP_INP_DECR_REF(it->inp);
1314 SCTP_TCB_LOCK(it->stcb);
1315 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1316 iteration_count = 0;
1317 }
1318 /* run function on this one */
1319 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1320
1321 /*
1322 * we lie here, it really needs to have its own type but
1323 * first I must verify that this won't effect things :-0
1324 */
1325 if (it->no_chunk_output == 0)
1326 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1327
1328 SCTP_TCB_UNLOCK(it->stcb);
1329 next_assoc:
1330 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1331 if (it->stcb == NULL) {
1332 /* Run last function */
1333 if (it->function_inp_end != NULL) {
1334 inp_skip = (*it->function_inp_end) (it->inp,
1335 it->pointer,
1336 it->val);
1337 }
1338 }
1339 }
1340 SCTP_INP_RUNLOCK(it->inp);
1341 no_stcb:
1342 /* done with all assocs on this endpoint, move on to next endpoint */
1343 it->done_current_ep = 0;
1344 SCTP_INP_WLOCK(it->inp);
1345 SCTP_INP_WUNLOCK(it->inp);
1346 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1347 it->inp = NULL;
1348 } else {
1349 SCTP_INP_INFO_RLOCK();
1350 it->inp = LIST_NEXT(it->inp, sctp_list);
1351 SCTP_INP_INFO_RUNLOCK();
1352 }
1353 if (it->inp == NULL) {
1354 goto done_with_iterator;
1355 }
1356 goto select_a_new_ep;
1357 }
1358
1359 void
1360 sctp_iterator_worker(void)
1361 {
1362 struct sctp_iterator *it = NULL;
1363
1364 /* This function is called with the WQ lock in place */
1365
1366 SCTP_BASE_INFO(iterator_running) = 1;
1367 again:
1368 it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1369 while (it) {
1370 /* now lets work on this one */
1371 TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1372 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1373 sctp_iterator_work(it);
1374 SCTP_IPI_ITERATOR_WQ_LOCK();
1375 /* sa_ignore FREED_MEMORY */
1376 it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1377 }
1378 if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1379 goto again;
1380 }
1381 SCTP_BASE_INFO(iterator_running) = 0;
1382 return;
1383 }
1384
1385 #endif
1386
1387
1388 static void
1389 sctp_handle_addr_wq(void)
1390 {
1391 /* deal with the ADDR wq from the rtsock calls */
1392 struct sctp_laddr *wi;
1393 struct sctp_asconf_iterator *asc;
1394
1395 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1396 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1397 if (asc == NULL) {
1398 /* Try later, no memory */
1399 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1400 (struct sctp_inpcb *)NULL,
1401 (struct sctp_tcb *)NULL,
1402 (struct sctp_nets *)NULL);
1403 return;
1404 }
1405 LIST_INIT(&asc->list_of_work);
1406 asc->cnt = 0;
1407 SCTP_IPI_ITERATOR_WQ_LOCK();
1408 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1409 while (wi != NULL) {
1410 LIST_REMOVE(wi, sctp_nxt_addr);
1411 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1412 asc->cnt++;
1413 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1414 }
1415 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1416 if (asc->cnt == 0) {
1417 SCTP_FREE(asc, SCTP_M_ASC_IT);
1418 } else {
1419 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1420 sctp_asconf_iterator_stcb,
1421 NULL, /* No ep end for boundall */
1422 SCTP_PCB_FLAGS_BOUNDALL,
1423 SCTP_PCB_ANY_FEATURES,
1424 SCTP_ASOC_ANY_STATE,
1425 (void *)asc, 0,
1426 sctp_asconf_iterator_end, NULL, 0);
1427 }
1428 }
1429
1430 int retcode = 0;
1431 int cur_oerr = 0;
1432
1433 void
1434 sctp_timeout_handler(void *t)
1435 {
1436 struct sctp_inpcb *inp;
1437 struct sctp_tcb *stcb;
1438 struct sctp_nets *net;
1439 struct sctp_timer *tmr;
1440
1441 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1442 struct socket *so;
1443
1444 #endif
1445 int did_output, type;
1446 struct sctp_iterator *it = NULL;
1447
1448 tmr = (struct sctp_timer *)t;
1449 inp = (struct sctp_inpcb *)tmr->ep;
1450 stcb = (struct sctp_tcb *)tmr->tcb;
1451 net = (struct sctp_nets *)tmr->net;
1452 did_output = 1;
1453
1454 #ifdef SCTP_AUDITING_ENABLED
1455 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1456 sctp_auditing(3, inp, stcb, net);
1457 #endif
1458
1459 /* sanity checks... */
1460 if (tmr->self != (void *)tmr) {
1461 /*
1462 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1463 * tmr);
1464 */
1465 return;
1466 }
1467 tmr->stopped_from = 0xa001;
1468 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1469 /*
1470 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1471 * tmr->type);
1472 */
1473 return;
1474 }
1475 tmr->stopped_from = 0xa002;
1476 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1477 return;
1478 }
1479 /* if this is an iterator timeout, get the struct and clear inp */
1480 tmr->stopped_from = 0xa003;
1481 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1482 it = (struct sctp_iterator *)inp;
1483 inp = NULL;
1484 }
1485 type = tmr->type;
1486 if (inp) {
1487 SCTP_INP_INCR_REF(inp);
1488 if ((inp->sctp_socket == 0) &&
1489 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1490 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1491 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1492 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1493 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1494 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1495 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1496 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1497 ) {
1498 SCTP_INP_DECR_REF(inp);
1499 return;
1500 }
1501 }
1502 tmr->stopped_from = 0xa004;
1503 if (stcb) {
1504 atomic_add_int(&stcb->asoc.refcnt, 1);
1505 if (stcb->asoc.state == 0) {
1506 atomic_add_int(&stcb->asoc.refcnt, -1);
1507 if (inp) {
1508 SCTP_INP_DECR_REF(inp);
1509 }
1510 return;
1511 }
1512 }
1513 tmr->stopped_from = 0xa005;
1514 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1515 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1516 if (inp) {
1517 SCTP_INP_DECR_REF(inp);
1518 }
1519 if (stcb) {
1520 atomic_add_int(&stcb->asoc.refcnt, -1);
1521 }
1522 return;
1523 }
1524 tmr->stopped_from = 0xa006;
1525
1526 if (stcb) {
1527 SCTP_TCB_LOCK(stcb);
1528 atomic_add_int(&stcb->asoc.refcnt, -1);
1529 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1530 ((stcb->asoc.state == 0) ||
1531 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1532 SCTP_TCB_UNLOCK(stcb);
1533 if (inp) {
1534 SCTP_INP_DECR_REF(inp);
1535 }
1536 return;
1537 }
1538 }
1539 /* record in stopped what t-o occured */
1540 tmr->stopped_from = tmr->type;
1541
1542 /* mark as being serviced now */
1543 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1544 /*
1545 * Callout has been rescheduled.
1546 */
1547 goto get_out;
1548 }
1549 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1550 /*
1551 * Not active, so no action.
1552 */
1553 goto get_out;
1554 }
1555 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1556
1557 /* call the handler for the appropriate timer type */
1558 switch (tmr->type) {
1559 case SCTP_TIMER_TYPE_ZERO_COPY:
1560 if (inp == NULL) {
1561 break;
1562 }
1563 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1564 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1565 }
1566 break;
1567 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1568 if (inp == NULL) {
1569 break;
1570 }
1571 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1572 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1573 }
1574 break;
1575 case SCTP_TIMER_TYPE_ADDR_WQ:
1576 sctp_handle_addr_wq();
1577 break;
1578 case SCTP_TIMER_TYPE_ITERATOR:
1579 SCTP_STAT_INCR(sctps_timoiterator);
1580 sctp_iterator_timer(it);
1581 break;
1582 case SCTP_TIMER_TYPE_SEND:
1583 if ((stcb == NULL) || (inp == NULL)) {
1584 break;
1585 }
1586 SCTP_STAT_INCR(sctps_timodata);
1587 stcb->asoc.timodata++;
1588 stcb->asoc.num_send_timers_up--;
1589 if (stcb->asoc.num_send_timers_up < 0) {
1590 stcb->asoc.num_send_timers_up = 0;
1591 }
1592 SCTP_TCB_LOCK_ASSERT(stcb);
1593 cur_oerr = stcb->asoc.overall_error_count;
1594 retcode = sctp_t3rxt_timer(inp, stcb, net);
1595 if (retcode) {
1596 /* no need to unlock on tcb its gone */
1597
1598 goto out_decr;
1599 }
1600 SCTP_TCB_LOCK_ASSERT(stcb);
1601 #ifdef SCTP_AUDITING_ENABLED
1602 sctp_auditing(4, inp, stcb, net);
1603 #endif
1604 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1605 if ((stcb->asoc.num_send_timers_up == 0) &&
1606 (stcb->asoc.sent_queue_cnt > 0)
1607 ) {
1608 struct sctp_tmit_chunk *chk;
1609
1610 /*
1611 * safeguard. If there on some on the sent queue
1612 * somewhere but no timers running something is
1613 * wrong... so we start a timer on the first chunk
1614 * on the send queue on whatever net it is sent to.
1615 */
1616 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1617 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1618 chk->whoTo);
1619 }
1620 break;
1621 case SCTP_TIMER_TYPE_INIT:
1622 if ((stcb == NULL) || (inp == NULL)) {
1623 break;
1624 }
1625 SCTP_STAT_INCR(sctps_timoinit);
1626 stcb->asoc.timoinit++;
1627 if (sctp_t1init_timer(inp, stcb, net)) {
1628 /* no need to unlock on tcb its gone */
1629 goto out_decr;
1630 }
1631 /* We do output but not here */
1632 did_output = 0;
1633 break;
1634 case SCTP_TIMER_TYPE_RECV:
1635 if ((stcb == NULL) || (inp == NULL)) {
1636 break;
1637 } {
1638 int abort_flag;
1639
1640 SCTP_STAT_INCR(sctps_timosack);
1641 stcb->asoc.timosack++;
1642 if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1643 sctp_sack_check(stcb, 0, 0, &abort_flag);
1644
1645 /*
1646 * EY if nr_sacks used then send an nr-sack , a sack
1647 * otherwise
1648 */
1649 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1650 sctp_send_nr_sack(stcb);
1651 else
1652 sctp_send_sack(stcb);
1653 }
1654 #ifdef SCTP_AUDITING_ENABLED
1655 sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1658 break;
1659 case SCTP_TIMER_TYPE_SHUTDOWN:
1660 if ((stcb == NULL) || (inp == NULL)) {
1661 break;
1662 }
1663 if (sctp_shutdown_timer(inp, stcb, net)) {
1664 /* no need to unlock on tcb its gone */
1665 goto out_decr;
1666 }
1667 SCTP_STAT_INCR(sctps_timoshutdown);
1668 stcb->asoc.timoshutdown++;
1669 #ifdef SCTP_AUDITING_ENABLED
1670 sctp_auditing(4, inp, stcb, net);
1671 #endif
1672 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1673 break;
1674 case SCTP_TIMER_TYPE_HEARTBEAT:
1675 {
1676 struct sctp_nets *lnet;
1677 int cnt_of_unconf = 0;
1678
1679 if ((stcb == NULL) || (inp == NULL)) {
1680 break;
1681 }
1682 SCTP_STAT_INCR(sctps_timoheartbeat);
1683 stcb->asoc.timoheartbeat++;
1684 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1685 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1686 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1687 cnt_of_unconf++;
1688 }
1689 }
1690 if (cnt_of_unconf == 0) {
1691 if (sctp_heartbeat_timer(inp, stcb, lnet,
1692 cnt_of_unconf)) {
1693 /* no need to unlock on tcb its gone */
1694 goto out_decr;
1695 }
1696 }
1697 #ifdef SCTP_AUDITING_ENABLED
1698 sctp_auditing(4, inp, stcb, lnet);
1699 #endif
1700 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1701 stcb->sctp_ep, stcb, lnet);
1702 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1703 }
1704 break;
1705 case SCTP_TIMER_TYPE_COOKIE:
1706 if ((stcb == NULL) || (inp == NULL)) {
1707 break;
1708 }
1709 if (sctp_cookie_timer(inp, stcb, net)) {
1710 /* no need to unlock on tcb its gone */
1711 goto out_decr;
1712 }
1713 SCTP_STAT_INCR(sctps_timocookie);
1714 stcb->asoc.timocookie++;
1715 #ifdef SCTP_AUDITING_ENABLED
1716 sctp_auditing(4, inp, stcb, net);
1717 #endif
1718 /*
1719 * We consider T3 and Cookie timer pretty much the same with
1720 * respect to where from in chunk_output.
1721 */
1722 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1723 break;
1724 case SCTP_TIMER_TYPE_NEWCOOKIE:
1725 {
1726 struct timeval tv;
1727 int i, secret;
1728
1729 if (inp == NULL) {
1730 break;
1731 }
1732 SCTP_STAT_INCR(sctps_timosecret);
1733 (void)SCTP_GETTIME_TIMEVAL(&tv);
1734 SCTP_INP_WLOCK(inp);
1735 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1736 inp->sctp_ep.last_secret_number =
1737 inp->sctp_ep.current_secret_number;
1738 inp->sctp_ep.current_secret_number++;
1739 if (inp->sctp_ep.current_secret_number >=
1740 SCTP_HOW_MANY_SECRETS) {
1741 inp->sctp_ep.current_secret_number = 0;
1742 }
1743 secret = (int)inp->sctp_ep.current_secret_number;
1744 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1745 inp->sctp_ep.secret_key[secret][i] =
1746 sctp_select_initial_TSN(&inp->sctp_ep);
1747 }
1748 SCTP_INP_WUNLOCK(inp);
1749 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1750 }
1751 did_output = 0;
1752 break;
1753 case SCTP_TIMER_TYPE_PATHMTURAISE:
1754 if ((stcb == NULL) || (inp == NULL)) {
1755 break;
1756 }
1757 SCTP_STAT_INCR(sctps_timopathmtu);
1758 sctp_pathmtu_timer(inp, stcb, net);
1759 did_output = 0;
1760 break;
1761 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1762 if ((stcb == NULL) || (inp == NULL)) {
1763 break;
1764 }
1765 if (sctp_shutdownack_timer(inp, stcb, net)) {
1766 /* no need to unlock on tcb its gone */
1767 goto out_decr;
1768 }
1769 SCTP_STAT_INCR(sctps_timoshutdownack);
1770 stcb->asoc.timoshutdownack++;
1771 #ifdef SCTP_AUDITING_ENABLED
1772 sctp_auditing(4, inp, stcb, net);
1773 #endif
1774 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1775 break;
1776 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1777 if ((stcb == NULL) || (inp == NULL)) {
1778 break;
1779 }
1780 SCTP_STAT_INCR(sctps_timoshutdownguard);
1781 sctp_abort_an_association(inp, stcb,
1782 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1783 /* no need to unlock on tcb its gone */
1784 goto out_decr;
1785
1786 case SCTP_TIMER_TYPE_STRRESET:
1787 if ((stcb == NULL) || (inp == NULL)) {
1788 break;
1789 }
1790 if (sctp_strreset_timer(inp, stcb, net)) {
1791 /* no need to unlock on tcb its gone */
1792 goto out_decr;
1793 }
1794 SCTP_STAT_INCR(sctps_timostrmrst);
1795 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1796 break;
1797 case SCTP_TIMER_TYPE_EARLYFR:
1798 /* Need to do FR of things for net */
1799 if ((stcb == NULL) || (inp == NULL)) {
1800 break;
1801 }
1802 SCTP_STAT_INCR(sctps_timoearlyfr);
1803 sctp_early_fr_timer(inp, stcb, net);
1804 break;
1805 case SCTP_TIMER_TYPE_ASCONF:
1806 if ((stcb == NULL) || (inp == NULL)) {
1807 break;
1808 }
1809 if (sctp_asconf_timer(inp, stcb, net)) {
1810 /* no need to unlock on tcb its gone */
1811 goto out_decr;
1812 }
1813 SCTP_STAT_INCR(sctps_timoasconf);
1814 #ifdef SCTP_AUDITING_ENABLED
1815 sctp_auditing(4, inp, stcb, net);
1816 #endif
1817 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1818 break;
1819 case SCTP_TIMER_TYPE_PRIM_DELETED:
1820 if ((stcb == NULL) || (inp == NULL)) {
1821 break;
1822 }
1823 sctp_delete_prim_timer(inp, stcb, net);
1824 SCTP_STAT_INCR(sctps_timodelprim);
1825 break;
1826
1827 case SCTP_TIMER_TYPE_AUTOCLOSE:
1828 if ((stcb == NULL) || (inp == NULL)) {
1829 break;
1830 }
1831 SCTP_STAT_INCR(sctps_timoautoclose);
1832 sctp_autoclose_timer(inp, stcb, net);
1833 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1834 did_output = 0;
1835 break;
1836 case SCTP_TIMER_TYPE_ASOCKILL:
1837 if ((stcb == NULL) || (inp == NULL)) {
1838 break;
1839 }
1840 SCTP_STAT_INCR(sctps_timoassockill);
1841 /* Can we free it yet? */
1842 SCTP_INP_DECR_REF(inp);
1843 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1844 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1845 so = SCTP_INP_SO(inp);
1846 atomic_add_int(&stcb->asoc.refcnt, 1);
1847 SCTP_TCB_UNLOCK(stcb);
1848 SCTP_SOCKET_LOCK(so, 1);
1849 SCTP_TCB_LOCK(stcb);
1850 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1851 #endif
1852 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1854 SCTP_SOCKET_UNLOCK(so, 1);
1855 #endif
1856 /*
1857 * free asoc, always unlocks (or destroy's) so prevent
1858 * duplicate unlock or unlock of a free mtx :-0
1859 */
1860 stcb = NULL;
1861 goto out_no_decr;
1862 case SCTP_TIMER_TYPE_INPKILL:
1863 SCTP_STAT_INCR(sctps_timoinpkill);
1864 if (inp == NULL) {
1865 break;
1866 }
1867 /*
1868 * special case, take away our increment since WE are the
1869 * killer
1870 */
1871 SCTP_INP_DECR_REF(inp);
1872 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1873 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1874 SCTP_CALLED_DIRECTLY_NOCMPSET);
1875 inp = NULL;
1876 goto out_no_decr;
1877 default:
1878 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1879 tmr->type);
1880 break;
1881 };
1882 #ifdef SCTP_AUDITING_ENABLED
1883 sctp_audit_log(0xF1, (uint8_t) tmr->type);
1884 if (inp)
1885 sctp_auditing(5, inp, stcb, net);
1886 #endif
1887 if ((did_output) && stcb) {
1888 /*
1889 * Now we need to clean up the control chunk chain if an
1890 * ECNE is on it. It must be marked as UNSENT again so next
1891 * call will continue to send it until such time that we get
1892 * a CWR, to remove it. It is, however, less likely that we
1893 * will find a ecn echo on the chain though.
1894 */
1895 sctp_fix_ecn_echo(&stcb->asoc);
1896 }
1897 get_out:
1898 if (stcb) {
1899 SCTP_TCB_UNLOCK(stcb);
1900 }
1901 out_decr:
1902 if (inp) {
1903 SCTP_INP_DECR_REF(inp);
1904 }
1905 out_no_decr:
1906 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1907 type);
1908 }
1909
1910 void
1911 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1912 struct sctp_nets *net)
1913 {
1914 int to_ticks;
1915 struct sctp_timer *tmr;
1916
1917 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1918 return;
1919
1920 to_ticks = 0;
1921
1922 tmr = NULL;
1923 if (stcb) {
1924 SCTP_TCB_LOCK_ASSERT(stcb);
1925 }
1926 switch (t_type) {
1927 case SCTP_TIMER_TYPE_ZERO_COPY:
1928 tmr = &inp->sctp_ep.zero_copy_timer;
1929 to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1930 break;
1931 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1932 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1933 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1934 break;
1935 case SCTP_TIMER_TYPE_ADDR_WQ:
1936 /* Only 1 tick away :-) */
1937 tmr = &SCTP_BASE_INFO(addr_wq_timer);
1938 to_ticks = SCTP_ADDRESS_TICK_DELAY;
1939 break;
1940 case SCTP_TIMER_TYPE_ITERATOR:
1941 {
1942 struct sctp_iterator *it;
1943
1944 it = (struct sctp_iterator *)inp;
1945 tmr = &it->tmr;
1946 to_ticks = SCTP_ITERATOR_TICKS;
1947 }
1948 break;
1949 case SCTP_TIMER_TYPE_SEND:
1950 /* Here we use the RTO timer */
1951 {
1952 int rto_val;
1953
1954 if ((stcb == NULL) || (net == NULL)) {
1955 return;
1956 }
1957 tmr = &net->rxt_timer;
1958 if (net->RTO == 0) {
1959 rto_val = stcb->asoc.initial_rto;
1960 } else {
1961 rto_val = net->RTO;
1962 }
1963 to_ticks = MSEC_TO_TICKS(rto_val);
1964 }
1965 break;
1966 case SCTP_TIMER_TYPE_INIT:
1967 /*
1968 * Here we use the INIT timer default usually about 1
1969 * minute.
1970 */
1971 if ((stcb == NULL) || (net == NULL)) {
1972 return;
1973 }
1974 tmr = &net->rxt_timer;
1975 if (net->RTO == 0) {
1976 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1977 } else {
1978 to_ticks = MSEC_TO_TICKS(net->RTO);
1979 }
1980 break;
1981 case SCTP_TIMER_TYPE_RECV:
1982 /*
1983 * Here we use the Delayed-Ack timer value from the inp
1984 * ususually about 200ms.
1985 */
1986 if (stcb == NULL) {
1987 return;
1988 }
1989 tmr = &stcb->asoc.dack_timer;
1990 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1991 break;
1992 case SCTP_TIMER_TYPE_SHUTDOWN:
1993 /* Here we use the RTO of the destination. */
1994 if ((stcb == NULL) || (net == NULL)) {
1995 return;
1996 }
1997 if (net->RTO == 0) {
1998 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1999 } else {
2000 to_ticks = MSEC_TO_TICKS(net->RTO);
2001 }
2002 tmr = &net->rxt_timer;
2003 break;
2004 case SCTP_TIMER_TYPE_HEARTBEAT:
2005 /*
2006 * the net is used here so that we can add in the RTO. Even
2007 * though we use a different timer. We also add the HB timer
2008 * PLUS a random jitter.
2009 */
2010 if ((inp == NULL) || (stcb == NULL)) {
2011 return;
2012 } else {
2013 uint32_t rndval;
2014 uint8_t this_random;
2015 int cnt_of_unconf = 0;
2016 struct sctp_nets *lnet;
2017
2018 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2019 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2020 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2021 cnt_of_unconf++;
2022 }
2023 }
2024 if (cnt_of_unconf) {
2025 net = lnet = NULL;
2026 (void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2027 }
2028 if (stcb->asoc.hb_random_idx > 3) {
2029 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2030 memcpy(stcb->asoc.hb_random_values, &rndval,
2031 sizeof(stcb->asoc.hb_random_values));
2032 stcb->asoc.hb_random_idx = 0;
2033 }
2034 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2035 stcb->asoc.hb_random_idx++;
2036 stcb->asoc.hb_ect_randombit = 0;
2037 /*
2038 * this_random will be 0 - 256 ms RTO is in ms.
2039 */
2040 if ((stcb->asoc.hb_is_disabled) &&
2041 (cnt_of_unconf == 0)) {
2042 return;
2043 }
2044 if (net) {
2045 int delay;
2046
2047 delay = stcb->asoc.heart_beat_delay;
2048 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2049 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2050 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2051 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2052 delay = 0;
2053 }
2054 }
2055 if (net->RTO == 0) {
2056 /* Never been checked */
2057 to_ticks = this_random + stcb->asoc.initial_rto + delay;
2058 } else {
2059 /* set rto_val to the ms */
2060 to_ticks = delay + net->RTO + this_random;
2061 }
2062 } else {
2063 if (cnt_of_unconf) {
2064 to_ticks = this_random + stcb->asoc.initial_rto;
2065 } else {
2066 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2067 }
2068 }
2069 /*
2070 * Now we must convert the to_ticks that are now in
2071 * ms to ticks.
2072 */
2073 to_ticks = MSEC_TO_TICKS(to_ticks);
2074 tmr = &stcb->asoc.hb_timer;
2075 }
2076 break;
2077 case SCTP_TIMER_TYPE_COOKIE:
2078 /*
2079 * Here we can use the RTO timer from the network since one
2080 * RTT was compelete. If a retran happened then we will be
2081 * using the RTO initial value.
2082 */
2083 if ((stcb == NULL) || (net == NULL)) {
2084 return;
2085 }
2086 if (net->RTO == 0) {
2087 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2088 } else {
2089 to_ticks = MSEC_TO_TICKS(net->RTO);
2090 }
2091 tmr = &net->rxt_timer;
2092 break;
2093 case SCTP_TIMER_TYPE_NEWCOOKIE:
2094 /*
2095 * nothing needed but the endpoint here ususually about 60
2096 * minutes.
2097 */
2098 if (inp == NULL) {
2099 return;
2100 }
2101 tmr = &inp->sctp_ep.signature_change;
2102 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2103 break;
2104 case SCTP_TIMER_TYPE_ASOCKILL:
2105 if (stcb == NULL) {
2106 return;
2107 }
2108 tmr = &stcb->asoc.strreset_timer;
2109 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2110 break;
2111 case SCTP_TIMER_TYPE_INPKILL:
2112 /*
2113 * The inp is setup to die. We re-use the signature_chage
2114 * timer since that has stopped and we are in the GONE
2115 * state.
2116 */
2117 if (inp == NULL) {
2118 return;
2119 }
2120 tmr = &inp->sctp_ep.signature_change;
2121 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2122 break;
2123 case SCTP_TIMER_TYPE_PATHMTURAISE:
2124 /*
2125 * Here we use the value found in the EP for PMTU ususually
2126 * about 10 minutes.
2127 */
2128 if ((stcb == NULL) || (inp == NULL)) {
2129 return;
2130 }
2131 if (net == NULL) {
2132 return;
2133 }
2134 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2135 tmr = &net->pmtu_timer;
2136 break;
2137 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2138 /* Here we use the RTO of the destination */
2139 if ((stcb == NULL) || (net == NULL)) {
2140 return;
2141 }
2142 if (net->RTO == 0) {
2143 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2144 } else {
2145 to_ticks = MSEC_TO_TICKS(net->RTO);
2146 }
2147 tmr = &net->rxt_timer;
2148 break;
2149 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2150 /*
2151 * Here we use the endpoints shutdown guard timer usually
2152 * about 3 minutes.
2153 */
2154 if ((inp == NULL) || (stcb == NULL)) {
2155 return;
2156 }
2157 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2158 tmr = &stcb->asoc.shut_guard_timer;
2159 break;
2160 case SCTP_TIMER_TYPE_STRRESET:
2161 /*
2162 * Here the timer comes from the stcb but its value is from
2163 * the net's RTO.
2164 */
2165 if ((stcb == NULL) || (net == NULL)) {
2166 return;
2167 }
2168 if (net->RTO == 0) {
2169 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2170 } else {
2171 to_ticks = MSEC_TO_TICKS(net->RTO);
2172 }
2173 tmr = &stcb->asoc.strreset_timer;
2174 break;
2175
2176 case SCTP_TIMER_TYPE_EARLYFR:
2177 {
2178 unsigned int msec;
2179
2180 if ((stcb == NULL) || (net == NULL)) {
2181 return;
2182 }
2183 if (net->flight_size > net->cwnd) {
2184 /* no need to start */
2185 return;
2186 }
2187 SCTP_STAT_INCR(sctps_earlyfrstart);
2188 if (net->lastsa == 0) {
2189 /* Hmm no rtt estimate yet? */
2190 msec = stcb->asoc.initial_rto >> 2;
2191 } else {
2192 msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2193 }
2194 if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2195 msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2196 if (msec < SCTP_MINFR_MSEC_FLOOR) {
2197 msec = SCTP_MINFR_MSEC_FLOOR;
2198 }
2199 }
2200 to_ticks = MSEC_TO_TICKS(msec);
2201 tmr = &net->fr_timer;
2202 }
2203 break;
2204 case SCTP_TIMER_TYPE_ASCONF:
2205 /*
2206 * Here the timer comes from the stcb but its value is from
2207 * the net's RTO.
2208 */
2209 if ((stcb == NULL) || (net == NULL)) {
2210 return;
2211 }
2212 if (net->RTO == 0) {
2213 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2214 } else {
2215 to_ticks = MSEC_TO_TICKS(net->RTO);
2216 }
2217 tmr = &stcb->asoc.asconf_timer;
2218 break;
2219 case SCTP_TIMER_TYPE_PRIM_DELETED:
2220 if ((stcb == NULL) || (net != NULL)) {
2221 return;
2222 }
2223 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2224 tmr = &stcb->asoc.delete_prim_timer;
2225 break;
2226 case SCTP_TIMER_TYPE_AUTOCLOSE:
2227 if (stcb == NULL) {
2228 return;
2229 }
2230 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2231 /*
2232 * Really an error since stcb is NOT set to
2233 * autoclose
2234 */
2235 return;
2236 }
2237 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2238 tmr = &stcb->asoc.autoclose_timer;
2239 break;
2240 default:
2241 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2242 __FUNCTION__, t_type);
2243 return;
2244 break;
2245 };
2246 if ((to_ticks <= 0) || (tmr == NULL)) {
2247 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2248 __FUNCTION__, t_type, to_ticks, tmr);
2249 return;
2250 }
2251 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2252 /*
2253 * we do NOT allow you to have it already running. if it is
2254 * we leave the current one up unchanged
2255 */
2256 return;
2257 }
2258 /* At this point we can proceed */
2259 if (t_type == SCTP_TIMER_TYPE_SEND) {
2260 stcb->asoc.num_send_timers_up++;
2261 }
2262 tmr->stopped_from = 0;
2263 tmr->type = t_type;
2264 tmr->ep = (void *)inp;
2265 tmr->tcb = (void *)stcb;
2266 tmr->net = (void *)net;
2267 tmr->self = (void *)tmr;
2268 tmr->ticks = sctp_get_tick_count();
2269 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2270 return;
2271 }
2272
2273 void
2274 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2275 struct sctp_nets *net, uint32_t from)
2276 {
2277 struct sctp_timer *tmr;
2278
2279 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2280 (inp == NULL))
2281 return;
2282
2283 tmr = NULL;
2284 if (stcb) {
2285 SCTP_TCB_LOCK_ASSERT(stcb);
2286 }
2287 switch (t_type) {
2288 case SCTP_TIMER_TYPE_ZERO_COPY:
2289 tmr = &inp->sctp_ep.zero_copy_timer;
2290 break;
2291 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2292 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2293 break;
2294 case SCTP_TIMER_TYPE_ADDR_WQ:
2295 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2296 break;
2297 case SCTP_TIMER_TYPE_EARLYFR:
2298 if ((stcb == NULL) || (net == NULL)) {
2299 return;
2300 }
2301 tmr = &net->fr_timer;
2302 SCTP_STAT_INCR(sctps_earlyfrstop);
2303 break;
2304 case SCTP_TIMER_TYPE_ITERATOR:
2305 {
2306 struct sctp_iterator *it;
2307
2308 it = (struct sctp_iterator *)inp;
2309 tmr = &it->tmr;
2310 }
2311 break;
2312 case SCTP_TIMER_TYPE_SEND:
2313 if ((stcb == NULL) || (net == NULL)) {
2314 return;
2315 }
2316 tmr = &net->rxt_timer;
2317 break;
2318 case SCTP_TIMER_TYPE_INIT:
2319 if ((stcb == NULL) || (net == NULL)) {
2320 return;
2321 }
2322 tmr = &net->rxt_timer;
2323 break;
2324 case SCTP_TIMER_TYPE_RECV:
2325 if (stcb == NULL) {
2326 return;
2327 }
2328 tmr = &stcb->asoc.dack_timer;
2329 break;
2330 case SCTP_TIMER_TYPE_SHUTDOWN:
2331 if ((stcb == NULL) || (net == NULL)) {
2332 return;
2333 }
2334 tmr = &net->rxt_timer;
2335 break;
2336 case SCTP_TIMER_TYPE_HEARTBEAT:
2337 if (stcb == NULL) {
2338 return;
2339 }
2340 tmr = &stcb->asoc.hb_timer;
2341 break;
2342 case SCTP_TIMER_TYPE_COOKIE:
2343 if ((stcb == NULL) || (net == NULL)) {
2344 return;
2345 }
2346 tmr = &net->rxt_timer;
2347 break;
2348 case SCTP_TIMER_TYPE_NEWCOOKIE:
2349 /* nothing needed but the endpoint here */
2350 tmr = &inp->sctp_ep.signature_change;
2351 /*
2352 * We re-use the newcookie timer for the INP kill timer. We
2353 * must assure that we do not kill it by accident.
2354 */
2355 break;
2356 case SCTP_TIMER_TYPE_ASOCKILL:
2357 /*
2358 * Stop the asoc kill timer.
2359 */
2360 if (stcb == NULL) {
2361 return;
2362 }
2363 tmr = &stcb->asoc.strreset_timer;
2364 break;
2365
2366 case SCTP_TIMER_TYPE_INPKILL:
2367 /*
2368 * The inp is setup to die. We re-use the signature_chage
2369 * timer since that has stopped and we are in the GONE
2370 * state.
2371 */
2372 tmr = &inp->sctp_ep.signature_change;
2373 break;
2374 case SCTP_TIMER_TYPE_PATHMTURAISE:
2375 if ((stcb == NULL) || (net == NULL)) {
2376 return;
2377 }
2378 tmr = &net->pmtu_timer;
2379 break;
2380 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2381 if ((stcb == NULL) || (net == NULL)) {
2382 return;
2383 }
2384 tmr = &net->rxt_timer;
2385 break;
2386 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2387 if (stcb == NULL) {
2388 return;
2389 }
2390 tmr = &stcb->asoc.shut_guard_timer;
2391 break;
2392 case SCTP_TIMER_TYPE_STRRESET:
2393 if (stcb == NULL) {
2394 return;
2395 }
2396 tmr = &stcb->asoc.strreset_timer;
2397 break;
2398 case SCTP_TIMER_TYPE_ASCONF:
2399 if (stcb == NULL) {
2400 return;
2401 }
2402 tmr = &stcb->asoc.asconf_timer;
2403 break;
2404 case SCTP_TIMER_TYPE_PRIM_DELETED:
2405 if (stcb == NULL) {
2406 return;
2407 }
2408 tmr = &stcb->asoc.delete_prim_timer;
2409 break;
2410 case SCTP_TIMER_TYPE_AUTOCLOSE:
2411 if (stcb == NULL) {
2412 return;
2413 }
2414 tmr = &stcb->asoc.autoclose_timer;
2415 break;
2416 default:
2417 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2418 __FUNCTION__, t_type);
2419 break;
2420 };
2421 if (tmr == NULL) {
2422 return;
2423 }
2424 if ((tmr->type != t_type) && tmr->type) {
2425 /*
2426 * Ok we have a timer that is under joint use. Cookie timer
2427 * per chance with the SEND timer. We therefore are NOT
2428 * running the timer that the caller wants stopped. So just
2429 * return.
2430 */
2431 return;
2432 }
2433 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2434 stcb->asoc.num_send_timers_up--;
2435 if (stcb->asoc.num_send_timers_up < 0) {
2436 stcb->asoc.num_send_timers_up = 0;
2437 }
2438 }
2439 tmr->self = NULL;
2440 tmr->stopped_from = from;
2441 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2442 return;
2443 }
2444
2445 #ifdef SCTP_USE_ADLER32
2446 static uint32_t
2447 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2448 {
2449 uint32_t s1 = adler & 0xffff;
2450 uint32_t s2 = (adler >> 16) & 0xffff;
2451 int n;
2452
2453 for (n = 0; n < len; n++, buf++) {
2454 /* s1 = (s1 + buf[n]) % BASE */
2455 /* first we add */
2456 s1 = (s1 + *buf);
2457 /*
2458 * now if we need to, we do a mod by subtracting. It seems a
2459 * bit faster since I really will only ever do one subtract
2460 * at the MOST, since buf[n] is a max of 255.
2461 */
2462 if (s1 >= SCTP_ADLER32_BASE) {
2463 s1 -= SCTP_ADLER32_BASE;
2464 }
2465 /* s2 = (s2 + s1) % BASE */
2466 /* first we add */
2467 s2 = (s2 + s1);
2468 /*
2469 * again, it is more efficent (it seems) to subtract since
2470 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2471 * worse case. This would then be (2 * BASE) - 2, which will
2472 * still only do one subtract. On Intel this is much better
2473 * to do this way and avoid the divide. Have not -pg'd on
2474 * sparc.
2475 */
2476 if (s2 >= SCTP_ADLER32_BASE) {
2477 s2 -= SCTP_ADLER32_BASE;
2478 }
2479 }
2480 /* Return the adler32 of the bytes buf[0..len-1] */
2481 return ((s2 << 16) + s1);
2482 }
2483
2484 #endif
2485
2486
2487 uint32_t
2488 sctp_calculate_len(struct mbuf *m)
2489 {
2490 uint32_t tlen = 0;
2491 struct mbuf *at;
2492
2493 at = m;
2494 while (at) {
2495 tlen += SCTP_BUF_LEN(at);
2496 at = SCTP_BUF_NEXT(at);
2497 }
2498 return (tlen);
2499 }
2500
2501 #if defined(SCTP_WITH_NO_CSUM)
2502
2503 uint32_t
2504 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2505 {
2506 /*
2507 * given a mbuf chain with a packetheader offset by 'offset'
2508 * pointing at a sctphdr (with csum set to 0) go through the chain
2509 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2510 * has a side bonus as it will calculate the total length of the
2511 * mbuf chain. Note: if offset is greater than the total mbuf
2512 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2513 */
2514 if (pktlen == NULL)
2515 return (0);
2516 *pktlen = sctp_calculate_len(m);
2517 return (0);
2518 }
2519
2520 #elif defined(SCTP_USE_INCHKSUM)
2521
2522 #include <machine/in_cksum.h>
2523
2524 uint32_t
2525 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2526 {
2527 /*
2528 * given a mbuf chain with a packetheader offset by 'offset'
2529 * pointing at a sctphdr (with csum set to 0) go through the chain
2530 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2531 * has a side bonus as it will calculate the total length of the
2532 * mbuf chain. Note: if offset is greater than the total mbuf
2533 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2534 */
2535 int32_t tlen = 0;
2536 struct mbuf *at;
2537 uint32_t the_sum, retsum;
2538
2539 at = m;
2540 while (at) {
2541 tlen += SCTP_BUF_LEN(at);
2542 at = SCTP_BUF_NEXT(at);
2543 }
2544 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2545 if (pktlen != NULL)
2546 *pktlen = (tlen - offset);
2547 retsum = htons(the_sum);
2548 return (the_sum);
2549 }
2550
2551 #else
2552
2553 uint32_t
2554 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2555 {
2556 /*
2557 * given a mbuf chain with a packetheader offset by 'offset'
2558 * pointing at a sctphdr (with csum set to 0) go through the chain
2559 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2560 * has a side bonus as it will calculate the total length of the
2561 * mbuf chain. Note: if offset is greater than the total mbuf
2562 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2563 */
2564 int32_t tlen = 0;
2565
2566 #ifdef SCTP_USE_ADLER32
2567 uint32_t base = 1L;
2568
2569 #else
2570 uint32_t base = 0xffffffff;
2571
2572 #endif
2573 struct mbuf *at;
2574
2575 at = m;
2576 /* find the correct mbuf and offset into mbuf */
2577 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2578 offset -= SCTP_BUF_LEN(at); /* update remaining offset
2579 * left */
2580 at = SCTP_BUF_NEXT(at);
2581 }
2582 while (at != NULL) {
2583 if ((SCTP_BUF_LEN(at) - offset) > 0) {
2584 #ifdef SCTP_USE_ADLER32
2585 base = update_adler32(base,
2586 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2587 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2588 #else
2589 if ((SCTP_BUF_LEN(at) - offset) < 4) {
2590 /* Use old method if less than 4 bytes */
2591 base = old_update_crc32(base,
2592 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2593 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2594 } else {
2595 base = update_crc32(base,
2596 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2597 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2598 }
2599 #endif
2600 tlen += SCTP_BUF_LEN(at) - offset;
2601 /* we only offset once into the first mbuf */
2602 }
2603 if (offset) {
2604 if (offset < (uint32_t) SCTP_BUF_LEN(at))
2605 offset = 0;
2606 else
2607 offset -= SCTP_BUF_LEN(at);
2608 }
2609 at = SCTP_BUF_NEXT(at);
2610 }
2611 if (pktlen != NULL) {
2612 *pktlen = tlen;
2613 }
2614 #ifdef SCTP_USE_ADLER32
2615 /* Adler32 */
2616 base = htonl(base);
2617 #else
2618 /* CRC-32c */
2619 base = sctp_csum_finalize(base);
2620 #endif
2621 return (base);
2622 }
2623
2624
2625 #endif
2626
2627 void
2628 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2629 struct sctp_association *asoc, uint32_t mtu)
2630 {
2631 /*
2632 * Reset the P-MTU size on this association, this involves changing
2633 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2634 * allow the DF flag to be cleared.
2635 */
2636 struct sctp_tmit_chunk *chk;
2637 unsigned int eff_mtu, ovh;
2638
2639 #ifdef SCTP_PRINT_FOR_B_AND_M
2640 SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2641 inp, asoc, mtu);
2642 #endif
2643 asoc->smallest_mtu = mtu;
2644 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2645 ovh = SCTP_MIN_OVERHEAD;
2646 } else {
2647 ovh = SCTP_MIN_V4_OVERHEAD;
2648 }
2649 eff_mtu = mtu - ovh;
2650 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2651
2652 if (chk->send_size > eff_mtu) {
2653 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2654 }
2655 }
2656 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2657 if (chk->send_size > eff_mtu) {
2658 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2659 }
2660 }
2661 }
2662
2663
2664 /*
2665 * given an association and starting time of the current RTT period return
2666 * RTO in number of msecs net should point to the current network
2667 */
2668 uint32_t
2669 sctp_calculate_rto(struct sctp_tcb *stcb,
2670 struct sctp_association *asoc,
2671 struct sctp_nets *net,
2672 struct timeval *told,
2673 int safe)
2674 {
2675 /*-
2676 * given an association and the starting time of the current RTT
2677 * period (in value1/value2) return RTO in number of msecs.
2678 */
2679 int calc_time = 0;
2680 int o_calctime;
2681 uint32_t new_rto = 0;
2682 int first_measure = 0;
2683 struct timeval now, then, *old;
2684
2685 /* Copy it out for sparc64 */
2686 if (safe == sctp_align_unsafe_makecopy) {
2687 old = &then;
2688 memcpy(&then, told, sizeof(struct timeval));
2689 } else if (safe == sctp_align_safe_nocopy) {
2690 old = told;
2691 } else {
2692 /* error */
2693 SCTP_PRINTF("Huh, bad rto calc call\n");
2694 return (0);
2695 }
2696 /************************/
2697 /* 1. calculate new RTT */
2698 /************************/
2699 /* get the current time */
2700 (void)SCTP_GETTIME_TIMEVAL(&now);
2701 /* compute the RTT value */
2702 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2703 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2704 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2705 calc_time += (((u_long)now.tv_usec -
2706 (u_long)old->tv_usec) / 1000);
2707 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2708 /* Borrow 1,000ms from current calculation */
2709 calc_time -= 1000;
2710 /* Add in the slop over */
2711 calc_time += ((int)now.tv_usec / 1000);
2712 /* Add in the pre-second ms's */
2713 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2714 }
2715 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2716 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2717 calc_time = ((u_long)now.tv_usec -
2718 (u_long)old->tv_usec) / 1000;
2719 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2720 /* impossible .. garbage in nothing out */
2721 goto calc_rto;
2722 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2723 /*
2724 * We have to have 1 usec :-D this must be the
2725 * loopback.
2726 */
2727 calc_time = 1;
2728 } else {
2729 /* impossible .. garbage in nothing out */
2730 goto calc_rto;
2731 }
2732 } else {
2733 /* Clock wrapped? */
2734 goto calc_rto;
2735 }
2736 /***************************/
2737 /* 2. update RTTVAR & SRTT */
2738 /***************************/
2739 o_calctime = calc_time;
2740 /* this is Van Jacobson's integer version */
2741 if (net->RTO_measured) {
2742 calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when
2743 * shift=3 */
2744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2745 rto_logging(net, SCTP_LOG_RTTVAR);
2746 }
2747 net->prev_rtt = o_calctime;
2748 net->lastsa += calc_time; /* add 7/8th into sa when
2749 * shift=3 */
2750 if (calc_time < 0) {
2751 calc_time = -calc_time;
2752 }
2753 calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); /* take away 1/4 when
2754 * VAR shift=2 */
2755 net->lastsv += calc_time;
2756 if (net->lastsv == 0) {
2757 net->lastsv = SCTP_CLOCK_GRANULARITY;
2758 }
2759 } else {
2760 /* First RTO measurment */
2761 net->RTO_measured = 1;
2762 net->lastsa = calc_time << SCTP_RTT_SHIFT; /* Multiply by 8 when
2763 * shift=3 */
2764 net->lastsv = calc_time;
2765 if (net->lastsv == 0) {
2766 net->lastsv = SCTP_CLOCK_GRANULARITY;
2767 }
2768 first_measure = 1;
2769 net->prev_rtt = o_calctime;
2770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2771 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2772 }
2773 }
2774 calc_rto:
2775 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2776 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2777 (stcb->asoc.sat_network_lockout == 0)) {
2778 stcb->asoc.sat_network = 1;
2779 } else if ((!first_measure) && stcb->asoc.sat_network) {
2780 stcb->asoc.sat_network = 0;
2781 stcb->asoc.sat_network_lockout = 1;
2782 }
2783 /* bound it, per C6/C7 in Section 5.3.1 */
2784 if (new_rto < stcb->asoc.minrto) {
2785 new_rto = stcb->asoc.minrto;
2786 }
2787 if (new_rto > stcb->asoc.maxrto) {
2788 new_rto = stcb->asoc.maxrto;
2789 }
2790 /* we are now returning the RTO */
2791 return (new_rto);
2792 }
2793
2794 /*
2795 * return a pointer to a contiguous piece of data from the given mbuf chain
2796 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2797 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2798 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2799 */
2800 caddr_t
2801 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2802 {
2803 uint32_t count;
2804 uint8_t *ptr;
2805
2806 ptr = in_ptr;
2807 if ((off < 0) || (len <= 0))
2808 return (NULL);
2809
2810 /* find the desired start location */
2811 while ((m != NULL) && (off > 0)) {
2812 if (off < SCTP_BUF_LEN(m))
2813 break;
2814 off -= SCTP_BUF_LEN(m);
2815 m = SCTP_BUF_NEXT(m);
2816 }
2817 if (m == NULL)
2818 return (NULL);
2819
2820 /* is the current mbuf large enough (eg. contiguous)? */
2821 if ((SCTP_BUF_LEN(m) - off) >= len) {
2822 return (mtod(m, caddr_t)+off);
2823 } else {
2824 /* else, it spans more than one mbuf, so save a temp copy... */
2825 while ((m != NULL) && (len > 0)) {
2826 count = min(SCTP_BUF_LEN(m) - off, len);
2827 bcopy(mtod(m, caddr_t)+off, ptr, count);
2828 len -= count;
2829 ptr += count;
2830 off = 0;
2831 m = SCTP_BUF_NEXT(m);
2832 }
2833 if ((m == NULL) && (len > 0))
2834 return (NULL);
2835 else
2836 return ((caddr_t)in_ptr);
2837 }
2838 }
2839
2840
2841
2842 struct sctp_paramhdr *
2843 sctp_get_next_param(struct mbuf *m,
2844 int offset,
2845 struct sctp_paramhdr *pull,
2846 int pull_limit)
2847 {
2848 /* This just provides a typed signature to Peter's Pull routine */
2849 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2850 (uint8_t *) pull));
2851 }
2852
2853
2854 int
2855 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2856 {
2857 /*
2858 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2859 * padlen is > 3 this routine will fail.
2860 */
2861 uint8_t *dp;
2862 int i;
2863
2864 if (padlen > 3) {
2865 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2866 return (ENOBUFS);
2867 }
2868 if (padlen <= M_TRAILINGSPACE(m)) {
2869 /*
2870 * The easy way. We hope the majority of the time we hit
2871 * here :)
2872 */
2873 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2874 SCTP_BUF_LEN(m) += padlen;
2875 } else {
2876 /* Hard way we must grow the mbuf */
2877 struct mbuf *tmp;
2878
2879 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2880 if (tmp == NULL) {
2881 /* Out of space GAK! we are in big trouble. */
2882 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2883 return (ENOSPC);
2884 }
2885 /* setup and insert in middle */
2886 SCTP_BUF_LEN(tmp) = padlen;
2887 SCTP_BUF_NEXT(tmp) = NULL;
2888 SCTP_BUF_NEXT(m) = tmp;
2889 dp = mtod(tmp, uint8_t *);
2890 }
2891 /* zero out the pad */
2892 for (i = 0; i < padlen; i++) {
2893 *dp = 0;
2894 dp++;
2895 }
2896 return (0);
2897 }
2898
2899 int
2900 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2901 {
2902 /* find the last mbuf in chain and pad it */
2903 struct mbuf *m_at;
2904
2905 m_at = m;
2906 if (last_mbuf) {
2907 return (sctp_add_pad_tombuf(last_mbuf, padval));
2908 } else {
2909 while (m_at) {
2910 if (SCTP_BUF_NEXT(m_at) == NULL) {
2911 return (sctp_add_pad_tombuf(m_at, padval));
2912 }
2913 m_at = SCTP_BUF_NEXT(m_at);
2914 }
2915 }
2916 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2917 return (EFAULT);
2918 }
2919
2920 int sctp_asoc_change_wake = 0;
2921
2922 static void
2923 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2924 uint32_t error, void *data, int so_locked
2925 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2926 SCTP_UNUSED
2927 #endif
2928 )
2929 {
2930 struct mbuf *m_notify;
2931 struct sctp_assoc_change *sac;
2932 struct sctp_queued_to_read *control;
2933
2934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2935 struct socket *so;
2936
2937 #endif
2938
2939 /*
2940 * For TCP model AND UDP connected sockets we will send an error up
2941 * when an ABORT comes in.
2942 */
2943 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2944 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2945 ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2946 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2947 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2948 stcb->sctp_socket->so_error = ECONNREFUSED;
2949 } else {
2950 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2951 stcb->sctp_socket->so_error = ECONNRESET;
2952 }
2953 /* Wake ANY sleepers */
2954 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2955 so = SCTP_INP_SO(stcb->sctp_ep);
2956 if (!so_locked) {
2957 atomic_add_int(&stcb->asoc.refcnt, 1);
2958 SCTP_TCB_UNLOCK(stcb);
2959 SCTP_SOCKET_LOCK(so, 1);
2960 SCTP_TCB_LOCK(stcb);
2961 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2962 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2963 SCTP_SOCKET_UNLOCK(so, 1);
2964 return;
2965 }
2966 }
2967 #endif
2968 sorwakeup(stcb->sctp_socket);
2969 sowwakeup(stcb->sctp_socket);
2970 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2971 if (!so_locked) {
2972 SCTP_SOCKET_UNLOCK(so, 1);
2973 }
2974 #endif
2975 sctp_asoc_change_wake++;
2976 }
2977 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2978 /* event not enabled */
2979 return;
2980 }
2981 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2982 if (m_notify == NULL)
2983 /* no space left */
2984 return;
2985 SCTP_BUF_LEN(m_notify) = 0;
2986
2987 sac = mtod(m_notify, struct sctp_assoc_change *);
2988 sac->sac_type = SCTP_ASSOC_CHANGE;
2989 sac->sac_flags = 0;
2990 sac->sac_length = sizeof(struct sctp_assoc_change);
2991 sac->sac_state = event;
2992 sac->sac_error = error;
2993 /* XXX verify these stream counts */
2994 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2995 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2996 sac->sac_assoc_id = sctp_get_associd(stcb);
2997 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2998 SCTP_BUF_NEXT(m_notify) = NULL;
2999 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3000 0, 0, 0, 0, 0, 0,
3001 m_notify);
3002 if (control == NULL) {
3003 /* no memory */
3004 sctp_m_freem(m_notify);
3005 return;
3006 }
3007 control->length = SCTP_BUF_LEN(m_notify);
3008 /* not that we need this */
3009 control->tail_mbuf = m_notify;
3010 control->spec_flags = M_NOTIFICATION;
3011 sctp_add_to_readq(stcb->sctp_ep, stcb,
3012 control,
3013 &stcb->sctp_socket->so_rcv, 1, so_locked);
3014 if (event == SCTP_COMM_LOST) {
3015 /* Wake up any sleeper */
3016 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3017 so = SCTP_INP_SO(stcb->sctp_ep);
3018 if (!so_locked) {
3019 atomic_add_int(&stcb->asoc.refcnt, 1);
3020 SCTP_TCB_UNLOCK(stcb);
3021 SCTP_SOCKET_LOCK(so, 1);
3022 SCTP_TCB_LOCK(stcb);
3023 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3024 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3025 SCTP_SOCKET_UNLOCK(so, 1);
3026 return;
3027 }
3028 }
3029 #endif
3030 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3031 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3032 if (!so_locked) {
3033 SCTP_SOCKET_UNLOCK(so, 1);
3034 }
3035 #endif
3036 }
3037 }
3038
3039 static void
3040 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3041 struct sockaddr *sa, uint32_t error)
3042 {
3043 struct mbuf *m_notify;
3044 struct sctp_paddr_change *spc;
3045 struct sctp_queued_to_read *control;
3046
3047 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3048 /* event not enabled */
3049 return;
3050 }
3051 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3052 if (m_notify == NULL)
3053 return;
3054 SCTP_BUF_LEN(m_notify) = 0;
3055 spc = mtod(m_notify, struct sctp_paddr_change *);
3056 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3057 spc->spc_flags = 0;
3058 spc->spc_length = sizeof(struct sctp_paddr_change);
3059 switch (sa->sa_family) {
3060 case AF_INET:
3061 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3062 break;
3063 #ifdef INET6
3064 case AF_INET6:
3065 {
3066 struct sockaddr_in6 *sin6;
3067
3068 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3069
3070 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3071 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3072 if (sin6->sin6_scope_id == 0) {
3073 /* recover scope_id for user */
3074 (void)sa6_recoverscope(sin6);
3075 } else {
3076 /* clear embedded scope_id for user */
3077 in6_clearscope(&sin6->sin6_addr);
3078 }
3079 }
3080 break;
3081 }
3082 #endif
3083 default:
3084 /* TSNH */
3085 break;
3086 }
3087 spc->spc_state = state;
3088 spc->spc_error = error;
3089 spc->spc_assoc_id = sctp_get_associd(stcb);
3090
3091 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3092 SCTP_BUF_NEXT(m_notify) = NULL;
3093
3094 /* append to socket */
3095 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3096 0, 0, 0, 0, 0, 0,
3097 m_notify);
3098 if (control == NULL) {
3099 /* no memory */
3100 sctp_m_freem(m_notify);
3101 return;
3102 }
3103 control->length = SCTP_BUF_LEN(m_notify);
3104 control->spec_flags = M_NOTIFICATION;
3105 /* not that we need this */
3106 control->tail_mbuf = m_notify;
3107 sctp_add_to_readq(stcb->sctp_ep, stcb,
3108 control,
3109 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3110 }
3111
3112
3113 static void
3114 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3115 struct sctp_tmit_chunk *chk, int so_locked
3116 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3117 SCTP_UNUSED
3118 #endif
3119 )
3120 {
3121 struct mbuf *m_notify;
3122 struct sctp_send_failed *ssf;
3123 struct sctp_queued_to_read *control;
3124 int length;
3125
3126 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3127 /* event not enabled */
3128 return;
3129 }
3130 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3131 if (m_notify == NULL)
3132 /* no space left */
3133 return;
3134 length = sizeof(struct sctp_send_failed) + chk->send_size;
3135 length -= sizeof(struct sctp_data_chunk);
3136 SCTP_BUF_LEN(m_notify) = 0;
3137 ssf = mtod(m_notify, struct sctp_send_failed *);
3138 ssf->ssf_type = SCTP_SEND_FAILED;
3139 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3140 ssf->ssf_flags = SCTP_DATA_UNSENT;
3141 else
3142 ssf->ssf_flags = SCTP_DATA_SENT;
3143 ssf->ssf_length = length;
3144 ssf->ssf_error = error;
3145 /* not exactly what the user sent in, but should be close :) */
3146 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3147 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3148 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3149 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3150 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3151 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3152 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3153 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3154
3155 SCTP_BUF_NEXT(m_notify) = chk->data;
3156 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3157 if (chk->data) {
3158 /*
3159 * trim off the sctp chunk header(it should be there)
3160 */
3161 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3162 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3163 sctp_mbuf_crush(chk->data);
3164 chk->send_size -= sizeof(struct sctp_data_chunk);
3165 }
3166 }
3167 /* Steal off the mbuf */
3168 chk->data = NULL;
3169 /*
3170 * For this case, we check the actual socket buffer, since the assoc
3171 * is going away we don't want to overfill the socket buffer for a
3172 * non-reader
3173 */
3174 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3175 sctp_m_freem(m_notify);
3176 return;
3177 }
3178 /* append to socket */
3179 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3180 0, 0, 0, 0, 0, 0,
3181 m_notify);
3182 if (control == NULL) {
3183 /* no memory */
3184 sctp_m_freem(m_notify);
3185 return;
3186 }
3187 control->spec_flags = M_NOTIFICATION;
3188 sctp_add_to_readq(stcb->sctp_ep, stcb,
3189 control,
3190 &stcb->sctp_socket->so_rcv, 1, so_locked);
3191 }
3192
3193
3194 static void
3195 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3196 struct sctp_stream_queue_pending *sp, int so_locked
3197 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3198 SCTP_UNUSED
3199 #endif
3200 )
3201 {
3202 struct mbuf *m_notify;
3203 struct sctp_send_failed *ssf;
3204 struct sctp_queued_to_read *control;
3205 int length;
3206
3207 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3208 /* event not enabled */
3209 return;
3210 }
3211 length = sizeof(struct sctp_send_failed) + sp->length;
3212 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3213 if (m_notify == NULL)
3214 /* no space left */
3215 return;
3216 SCTP_BUF_LEN(m_notify) = 0;
3217 ssf = mtod(m_notify, struct sctp_send_failed *);
3218 ssf->ssf_type = SCTP_SEND_FAILED;
3219 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3220 ssf->ssf_flags = SCTP_DATA_UNSENT;
3221 else
3222 ssf->ssf_flags = SCTP_DATA_SENT;
3223 ssf->ssf_length = length;
3224 ssf->ssf_error = error;
3225 /* not exactly what the user sent in, but should be close :) */
3226 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3227 ssf->ssf_info.sinfo_stream = sp->stream;
3228 ssf->ssf_info.sinfo_ssn = sp->strseq;
3229 if (sp->some_taken) {
3230 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3231 } else {
3232 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3233 }
3234 ssf->ssf_info.sinfo_ppid = sp->ppid;
3235 ssf->ssf_info.sinfo_context = sp->context;
3236 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3237 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3238 SCTP_BUF_NEXT(m_notify) = sp->data;
3239 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3240
3241 /* Steal off the mbuf */
3242 sp->data = NULL;
3243 /*
3244 * For this case, we check the actual socket buffer, since the assoc
3245 * is going away we don't want to overfill the socket buffer for a
3246 * non-reader
3247 */
3248 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3249 sctp_m_freem(m_notify);
3250 return;
3251 }
3252 /* append to socket */
3253 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3254 0, 0, 0, 0, 0, 0,
3255 m_notify);
3256 if (control == NULL) {
3257 /* no memory */
3258 sctp_m_freem(m_notify);
3259 return;
3260 }
3261 control->spec_flags = M_NOTIFICATION;
3262 sctp_add_to_readq(stcb->sctp_ep, stcb,
3263 control,
3264 &stcb->sctp_socket->so_rcv, 1, so_locked);
3265 }
3266
3267
3268
3269 static void
3270 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3271 uint32_t error)
3272 {
3273 struct mbuf *m_notify;
3274 struct sctp_adaptation_event *sai;
3275 struct sctp_queued_to_read *control;
3276
3277 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3278 /* event not enabled */
3279 return;
3280 }
3281 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3282 if (m_notify == NULL)
3283 /* no space left */
3284 return;
3285 SCTP_BUF_LEN(m_notify) = 0;
3286 sai = mtod(m_notify, struct sctp_adaptation_event *);
3287 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3288 sai->sai_flags = 0;
3289 sai->sai_length = sizeof(struct sctp_adaptation_event);
3290 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3291 sai->sai_assoc_id = sctp_get_associd(stcb);
3292
3293 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3294 SCTP_BUF_NEXT(m_notify) = NULL;
3295
3296 /* append to socket */
3297 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3298 0, 0, 0, 0, 0, 0,
3299 m_notify);
3300 if (control == NULL) {
3301 /* no memory */
3302 sctp_m_freem(m_notify);
3303 return;
3304 }
3305 control->length = SCTP_BUF_LEN(m_notify);
3306 control->spec_flags = M_NOTIFICATION;
3307 /* not that we need this */
3308 control->tail_mbuf = m_notify;
3309 sctp_add_to_readq(stcb->sctp_ep, stcb,
3310 control,
3311 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3312 }
3313
3314 /* This always must be called with the read-queue LOCKED in the INP */
3315 void
3316 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3317 int nolock, uint32_t val)
3318 {
3319 struct mbuf *m_notify;
3320 struct sctp_pdapi_event *pdapi;
3321 struct sctp_queued_to_read *control;
3322 struct sockbuf *sb;
3323
3324 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3325 /* event not enabled */
3326 return;
3327 }
3328 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3329 if (m_notify == NULL)
3330 /* no space left */
3331 return;
3332 SCTP_BUF_LEN(m_notify) = 0;
3333 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3334 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3335 pdapi->pdapi_flags = 0;
3336 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3337 pdapi->pdapi_indication = error;
3338 pdapi->pdapi_stream = (val >> 16);
3339 pdapi->pdapi_seq = (val & 0x0000ffff);
3340 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3341
3342 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3343 SCTP_BUF_NEXT(m_notify) = NULL;
3344 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3345 0, 0, 0, 0, 0, 0,
3346 m_notify);
3347 if (control == NULL) {
3348 /* no memory */
3349 sctp_m_freem(m_notify);
3350 return;
3351 }
3352 control->spec_flags = M_NOTIFICATION;
3353 control->length = SCTP_BUF_LEN(m_notify);
3354 /* not that we need this */
3355 control->tail_mbuf = m_notify;
3356 control->held_length = 0;
3357 control->length = 0;
3358 if (nolock == 0) {
3359 SCTP_INP_READ_LOCK(stcb->sctp_ep);
3360 }
3361 sb = &stcb->sctp_socket->so_rcv;
3362 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3363 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3364 }
3365 sctp_sballoc(stcb, sb, m_notify);
3366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3367 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3368 }
3369 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3370 control->end_added = 1;
3371 if (stcb->asoc.control_pdapi)
3372 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3373 else {
3374 /* we really should not see this case */
3375 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3376 }
3377 if (nolock == 0) {
3378 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3379 }
3380 if (stcb->sctp_ep && stcb->sctp_socket) {
3381 /* This should always be the case */
3382 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3383 }
3384 }
3385
3386 static void
3387 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3388 {
3389 struct mbuf *m_notify;
3390 struct sctp_shutdown_event *sse;
3391 struct sctp_queued_to_read *control;
3392
3393 /*
3394 * For TCP model AND UDP connected sockets we will send an error up
3395 * when an SHUTDOWN completes
3396 */
3397 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3398 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3399 /* mark socket closed for read/write and wakeup! */
3400 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3401 struct socket *so;
3402
3403 so = SCTP_INP_SO(stcb->sctp_ep);
3404 atomic_add_int(&stcb->asoc.refcnt, 1);
3405 SCTP_TCB_UNLOCK(stcb);
3406 SCTP_SOCKET_LOCK(so, 1);
3407 SCTP_TCB_LOCK(stcb);
3408 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3409 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3410 SCTP_SOCKET_UNLOCK(so, 1);
3411 return;
3412 }
3413 #endif
3414 socantsendmore(stcb->sctp_socket);
3415 socantrcvmore(stcb->sctp_socket);
3416 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3417 SCTP_SOCKET_UNLOCK(so, 1);
3418 #endif
3419 }
3420 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3421 /* event not enabled */
3422 return;
3423 }
3424 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3425 if (m_notify == NULL)
3426 /* no space left */
3427 return;
3428 sse = mtod(m_notify, struct sctp_shutdown_event *);
3429 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3430 sse->sse_flags = 0;
3431 sse->sse_length = sizeof(struct sctp_shutdown_event);
3432 sse->sse_assoc_id = sctp_get_associd(stcb);
3433
3434 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3435 SCTP_BUF_NEXT(m_notify) = NULL;
3436
3437 /* append to socket */
3438 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3439 0, 0, 0, 0, 0, 0,
3440 m_notify);
3441 if (control == NULL) {
3442 /* no memory */
3443 sctp_m_freem(m_notify);
3444 return;
3445 }
3446 control->spec_flags = M_NOTIFICATION;
3447 control->length = SCTP_BUF_LEN(m_notify);
3448 /* not that we need this */
3449 control->tail_mbuf = m_notify;
3450 sctp_add_to_readq(stcb->sctp_ep, stcb,
3451 control,
3452 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3453 }
3454
3455 static void
3456 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3457 int so_locked
3458 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3459 SCTP_UNUSED
3460 #endif
3461 )
3462 {
3463 struct mbuf *m_notify;
3464 struct sctp_sender_dry_event *event;
3465 struct sctp_queued_to_read *control;
3466
3467 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3468 /* event not enabled */
3469 return;
3470 }
3471 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3472 if (m_notify == NULL) {
3473 /* no space left */
3474 return;
3475 }
3476 SCTP_BUF_LEN(m_notify) = 0;
3477 event = mtod(m_notify, struct sctp_sender_dry_event *);
3478 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3479 event->sender_dry_flags = 0;
3480 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3481 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3482
3483 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3484 SCTP_BUF_NEXT(m_notify) = NULL;
3485
3486 /* append to socket */
3487 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3488 0, 0, 0, 0, 0, 0, m_notify);
3489 if (control == NULL) {
3490 /* no memory */
3491 sctp_m_freem(m_notify);
3492 return;
3493 }
3494 control->length = SCTP_BUF_LEN(m_notify);
3495 control->spec_flags = M_NOTIFICATION;
3496 /* not that we need this */
3497 control->tail_mbuf = m_notify;
3498 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3499 &stcb->sctp_socket->so_rcv, 1, so_locked);
3500 }
3501
3502 static void
3503 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3504 int number_entries, uint16_t * list, int flag)
3505 {
3506 struct mbuf *m_notify;
3507 struct sctp_queued_to_read *control;
3508 struct sctp_stream_reset_event *strreset;
3509 int len;
3510
3511 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3512 /* event not enabled */
3513 return;
3514 }
3515 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3516 if (m_notify == NULL)
3517 /* no space left */
3518 return;
3519 SCTP_BUF_LEN(m_notify) = 0;
3520 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3521 if (len > M_TRAILINGSPACE(m_notify)) {
3522 /* never enough room */
3523 sctp_m_freem(m_notify);
3524 return;
3525 }
3526 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3527 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3528 if (number_entries == 0) {
3529 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3530 } else {
3531 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3532 }
3533 strreset->strreset_length = len;
3534 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3535 if (number_entries) {
3536 int i;
3537
3538 for (i = 0; i < number_entries; i++) {
3539 strreset->strreset_list[i] = ntohs(list[i]);
3540 }
3541 }
3542 SCTP_BUF_LEN(m_notify) = len;
3543 SCTP_BUF_NEXT(m_notify) = NULL;
3544 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3545 /* no space */
3546 sctp_m_freem(m_notify);
3547 return;
3548 }
3549 /* append to socket */
3550 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3551 0, 0, 0, 0, 0, 0,
3552 m_notify);
3553 if (control == NULL) {
3554 /* no memory */
3555 sctp_m_freem(m_notify);
3556 return;
3557 }
3558 control->spec_flags = M_NOTIFICATION;
3559 control->length = SCTP_BUF_LEN(m_notify);
3560 /* not that we need this */
3561 control->tail_mbuf = m_notify;
3562 sctp_add_to_readq(stcb->sctp_ep, stcb,
3563 control,
3564 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3565 }
3566
3567
3568 void
3569 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3570 uint32_t error, void *data, int so_locked
3571 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3572 SCTP_UNUSED
3573 #endif
3574 )
3575 {
3576 if ((stcb == NULL) ||
3577 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3578 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3579 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3580 /* If the socket is gone we are out of here */
3581 return;
3582 }
3583 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3584 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3585 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3586 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3587 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3588 /* Don't report these in front states */
3589 return;
3590 }
3591 }
3592 switch (notification) {
3593 case SCTP_NOTIFY_ASSOC_UP:
3594 if (stcb->asoc.assoc_up_sent == 0) {
3595 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3596 stcb->asoc.assoc_up_sent = 1;
3597 }
3598 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3599 sctp_notify_adaptation_layer(stcb, error);
3600 }
3601 if (stcb->asoc.peer_supports_auth == 0) {
3602 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3603 NULL, so_locked);
3604 }
3605 break;
3606 case SCTP_NOTIFY_ASSOC_DOWN:
3607 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3608 break;
3609 case SCTP_NOTIFY_INTERFACE_DOWN:
3610 {
3611 struct sctp_nets *net;
3612
3613 net = (struct sctp_nets *)data;
3614 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3615 (struct sockaddr *)&net->ro._l_addr, error);
3616 break;
3617 }
3618 case SCTP_NOTIFY_INTERFACE_UP:
3619 {
3620 struct sctp_nets *net;
3621
3622 net = (struct sctp_nets *)data;
3623 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3624 (struct sockaddr *)&net->ro._l_addr, error);
3625 break;
3626 }
3627 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3628 {
3629 struct sctp_nets *net;
3630
3631 net = (struct sctp_nets *)data;
3632 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3633 (struct sockaddr *)&net->ro._l_addr, error);
3634 break;
3635 }
3636 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3637 sctp_notify_send_failed2(stcb, error,
3638 (struct sctp_stream_queue_pending *)data, so_locked);
3639 break;
3640 case SCTP_NOTIFY_DG_FAIL:
3641 sctp_notify_send_failed(stcb, error,
3642 (struct sctp_tmit_chunk *)data, so_locked);
3643 break;
3644 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3645 {
3646 uint32_t val;
3647
3648 val = *((uint32_t *) data);
3649
3650 sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3651 }
3652 break;
3653 case SCTP_NOTIFY_STRDATA_ERR:
3654 break;
3655 case SCTP_NOTIFY_ASSOC_ABORTED:
3656 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3657 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3658 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3659 } else {
3660 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3661 }
3662 break;
3663 case SCTP_NOTIFY_PEER_OPENED_STREAM:
3664 break;
3665 case SCTP_NOTIFY_STREAM_OPENED_OK:
3666 break;
3667 case SCTP_NOTIFY_ASSOC_RESTART:
3668 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3669 if (stcb->asoc.peer_supports_auth == 0) {
3670 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3671 NULL, so_locked);
3672 }
3673 break;
3674 case SCTP_NOTIFY_HB_RESP:
3675 break;
3676 case SCTP_NOTIFY_STR_RESET_SEND:
3677 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3678 break;
3679 case SCTP_NOTIFY_STR_RESET_RECV:
3680 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3681 break;
3682 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3683 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3684 break;
3685
3686 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3687 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3688 break;
3689
3690 case SCTP_NOTIFY_ASCONF_ADD_IP:
3691 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3692 error);
3693 break;
3694 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3695 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3696 error);
3697 break;
3698 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3699 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3700 error);
3701 break;
3702 case SCTP_NOTIFY_ASCONF_SUCCESS:
3703 break;
3704 case SCTP_NOTIFY_ASCONF_FAILED:
3705 break;
3706 case SCTP_NOTIFY_PEER_SHUTDOWN:
3707 sctp_notify_shutdown_event(stcb);
3708 break;
3709 case SCTP_NOTIFY_AUTH_NEW_KEY:
3710 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3711 (uint16_t) (uintptr_t) data,
3712 so_locked);
3713 break;
3714 case SCTP_NOTIFY_AUTH_FREE_KEY:
3715 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3716 (uint16_t) (uintptr_t) data,
3717 so_locked);
3718 break;
3719 case SCTP_NOTIFY_NO_PEER_AUTH:
3720 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3721 (uint16_t) (uintptr_t) data,
3722 so_locked);
3723 break;
3724 case SCTP_NOTIFY_SENDER_DRY:
3725 sctp_notify_sender_dry_event(stcb, so_locked);
3726 break;
3727 default:
3728 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3729 __FUNCTION__, notification, notification);
3730 break;
3731 } /* end switch */
3732 }
3733
3734 void
3735 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3736 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3737 SCTP_UNUSED
3738 #endif
3739 )
3740 {
3741 struct sctp_association *asoc;
3742 struct sctp_stream_out *outs;
3743 struct sctp_tmit_chunk *chk;
3744 struct sctp_stream_queue_pending *sp;
3745 int i;
3746
3747 asoc = &stcb->asoc;
3748
3749 if (stcb == NULL) {
3750 return;
3751 }
3752 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3753 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3754 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3755 return;
3756 }
3757 /* now through all the gunk freeing chunks */
3758 if (holds_lock == 0) {
3759 SCTP_TCB_SEND_LOCK(stcb);
3760 }
3761 /* sent queue SHOULD be empty */
3762 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3763 chk = TAILQ_FIRST(&asoc->sent_queue);
3764 while (chk) {
3765 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3766 asoc->sent_queue_cnt--;
3767 sctp_free_bufspace(stcb, asoc, chk, 1);
3768 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3769 SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3770 if (chk->data) {
3771 sctp_m_freem(chk->data);
3772 chk->data = NULL;
3773 }
3774 sctp_free_a_chunk(stcb, chk);
3775 /* sa_ignore FREED_MEMORY */
3776 chk = TAILQ_FIRST(&asoc->sent_queue);
3777 }
3778 }
3779 /* pending send queue SHOULD be empty */
3780 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3781 chk = TAILQ_FIRST(&asoc->send_queue);
3782 while (chk) {
3783 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3784 asoc->send_queue_cnt--;
3785 sctp_free_bufspace(stcb, asoc, chk, 1);
3786 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3787 if (chk->data) {
3788 sctp_m_freem(chk->data);
3789 chk->data = NULL;
3790 }
3791 sctp_free_a_chunk(stcb, chk);
3792 /* sa_ignore FREED_MEMORY */
3793 chk = TAILQ_FIRST(&asoc->send_queue);
3794 }
3795 }
3796 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3797 /* For each stream */
3798 outs = &stcb->asoc.strmout[i];
3799 /* clean up any sends there */
3800 stcb->asoc.locked_on_sending = NULL;
3801 sp = TAILQ_FIRST(&outs->outqueue);
3802 while (sp) {
3803 stcb->asoc.stream_queue_cnt--;
3804 TAILQ_REMOVE(&outs->outqueue, sp, next);
3805 sctp_free_spbufspace(stcb, asoc, sp);
3806 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3807 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3808 if (sp->data) {
3809 sctp_m_freem(sp->data);
3810 sp->data = NULL;
3811 }
3812 if (sp->net)
3813 sctp_free_remote_addr(sp->net);
3814 sp->net = NULL;
3815 /* Free the chunk */
3816 sctp_free_a_strmoq(stcb, sp);
3817 /* sa_ignore FREED_MEMORY */
3818 sp = TAILQ_FIRST(&outs->outqueue);
3819 }
3820 }
3821
3822 if (holds_lock == 0) {
3823 SCTP_TCB_SEND_UNLOCK(stcb);
3824 }
3825 }
3826
3827 void
3828 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3830 SCTP_UNUSED
3831 #endif
3832 )
3833 {
3834
3835 if (stcb == NULL) {
3836 return;
3837 }
3838 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3839 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3840 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3841 return;
3842 }
3843 /* Tell them we lost the asoc */
3844 sctp_report_all_outbound(stcb, 1, so_locked);
3845 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3846 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3847 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3848 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3849 }
3850 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3851 }
3852
3853 void
3854 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3855 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3856 uint32_t vrf_id, uint16_t port)
3857 {
3858 uint32_t vtag;
3859
3860 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3861 struct socket *so;
3862
3863 #endif
3864
3865 vtag = 0;
3866 if (stcb != NULL) {
3867 /* We have a TCB to abort, send notification too */
3868 vtag = stcb->asoc.peer_vtag;
3869 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3870 /* get the assoc vrf id and table id */
3871 vrf_id = stcb->asoc.vrf_id;
3872 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3873 }
3874 sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3875 if (stcb != NULL) {
3876 /* Ok, now lets free it */
3877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3878 so = SCTP_INP_SO(inp);
3879 atomic_add_int(&stcb->asoc.refcnt, 1);
3880 SCTP_TCB_UNLOCK(stcb);
3881 SCTP_SOCKET_LOCK(so, 1);
3882 SCTP_TCB_LOCK(stcb);
3883 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3884 #endif
3885 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3886 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3887 SCTP_SOCKET_UNLOCK(so, 1);
3888 #endif
3889 } else {
3890 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3891 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3892 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3893 SCTP_CALLED_DIRECTLY_NOCMPSET);
3894 }
3895 }
3896 }
3897 }
3898
3899 #ifdef SCTP_ASOCLOG_OF_TSNS
3900 void
3901 sctp_print_out_track_log(struct sctp_tcb *stcb)
3902 {
3903 #ifdef NOSIY_PRINTS
3904 int i;
3905
3906 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3907 SCTP_PRINTF("IN bound TSN log-aaa\n");
3908 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3909 SCTP_PRINTF("None rcvd\n");
3910 goto none_in;
3911 }
3912 if (stcb->asoc.tsn_in_wrapped) {
3913 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3914 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3915 stcb->asoc.in_tsnlog[i].tsn,
3916 stcb->asoc.in_tsnlog[i].strm,
3917 stcb->asoc.in_tsnlog[i].seq,
3918 stcb->asoc.in_tsnlog[i].flgs,
3919 stcb->asoc.in_tsnlog[i].sz);
3920 }
3921 }
3922 if (stcb->asoc.tsn_in_at) {
3923 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3924 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3925 stcb->asoc.in_tsnlog[i].tsn,
3926 stcb->asoc.in_tsnlog[i].strm,
3927 stcb->asoc.in_tsnlog[i].seq,
3928 stcb->asoc.in_tsnlog[i].flgs,
3929 stcb->asoc.in_tsnlog[i].sz);
3930 }
3931 }
3932 none_in:
3933 SCTP_PRINTF("OUT bound TSN log-aaa\n");
3934 if ((stcb->asoc.tsn_out_at == 0) &&
3935 (stcb->asoc.tsn_out_wrapped == 0)) {
3936 SCTP_PRINTF("None sent\n");
3937 }
3938 if (stcb->asoc.tsn_out_wrapped) {
3939 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3940 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3941 stcb->asoc.out_tsnlog[i].tsn,
3942 stcb->asoc.out_tsnlog[i].strm,
3943 stcb->asoc.out_tsnlog[i].seq,
3944 stcb->asoc.out_tsnlog[i].flgs,
3945 stcb->asoc.out_tsnlog[i].sz);
3946 }
3947 }
3948 if (stcb->asoc.tsn_out_at) {
3949 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3950 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3951 stcb->asoc.out_tsnlog[i].tsn,
3952 stcb->asoc.out_tsnlog[i].strm,
3953 stcb->asoc.out_tsnlog[i].seq,
3954 stcb->asoc.out_tsnlog[i].flgs,
3955 stcb->asoc.out_tsnlog[i].sz);
3956 }
3957 }
3958 #endif
3959 }
3960
3961 #endif
3962
3963 void
3964 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3965 int error, struct mbuf *op_err,
3966 int so_locked
3967 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3968 SCTP_UNUSED
3969 #endif
3970 )
3971 {
3972 uint32_t vtag;
3973
3974 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3975 struct socket *so;
3976
3977 #endif
3978
3979 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 so = SCTP_INP_SO(inp);
3981 #endif
3982 if (stcb == NULL) {
3983 /* Got to have a TCB */
3984 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3985 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3986 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3987 SCTP_CALLED_DIRECTLY_NOCMPSET);
3988 }
3989 }
3990 return;
3991 } else {
3992 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3993 }
3994 vtag = stcb->asoc.peer_vtag;
3995 /* notify the ulp */
3996 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3997 sctp_abort_notification(stcb, error, so_locked);
3998 /* notify the peer */
3999 #if defined(SCTP_PANIC_ON_ABORT)
4000 panic("aborting an association");
4001 #endif
4002 sctp_send_abort_tcb(stcb, op_err, so_locked);
4003 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4004 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4005 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4006 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4007 }
4008 /* now free the asoc */
4009 #ifdef SCTP_ASOCLOG_OF_TSNS
4010 sctp_print_out_track_log(stcb);
4011 #endif
4012 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 if (!so_locked) {
4014 atomic_add_int(&stcb->asoc.refcnt, 1);
4015 SCTP_TCB_UNLOCK(stcb);
4016 SCTP_SOCKET_LOCK(so, 1);
4017 SCTP_TCB_LOCK(stcb);
4018 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4019 }
4020 #endif
4021 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4022 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4023 if (!so_locked) {
4024 SCTP_SOCKET_UNLOCK(so, 1);
4025 }
4026 #endif
4027 }
4028
4029 void
4030 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
4031 struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
4032 {
4033 struct sctp_chunkhdr *ch, chunk_buf;
4034 unsigned int chk_length;
4035
4036 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4037 /* Generate a TO address for future reference */
4038 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4039 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4040 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4041 SCTP_CALLED_DIRECTLY_NOCMPSET);
4042 }
4043 }
4044 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4045 sizeof(*ch), (uint8_t *) & chunk_buf);
4046 while (ch != NULL) {
4047 chk_length = ntohs(ch->chunk_length);
4048 if (chk_length < sizeof(*ch)) {
4049 /* break to abort land */
4050 break;
4051 }
4052 switch (ch->chunk_type) {
4053 case SCTP_COOKIE_ECHO:
4054 /* We hit here only if the assoc is being freed */
4055 return;
4056 case SCTP_PACKET_DROPPED:
4057 /* we don't respond to pkt-dropped */
4058 return;
4059 case SCTP_ABORT_ASSOCIATION:
4060 /* we don't respond with an ABORT to an ABORT */
4061 return;
4062 case SCTP_SHUTDOWN_COMPLETE:
4063 /*
4064 * we ignore it since we are not waiting for it and
4065 * peer is gone
4066 */
4067 return;
4068 case SCTP_SHUTDOWN_ACK:
4069 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4070 return;
4071 default:
4072 break;
4073 }
4074 offset += SCTP_SIZE32(chk_length);
4075 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4076 sizeof(*ch), (uint8_t *) & chunk_buf);
4077 }
4078 sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4079 }
4080
4081 /*
4082 * check the inbound datagram to make sure there is not an abort inside it,
4083 * if there is return 1, else return 0.
4084 */
4085 int
4086 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4087 {
4088 struct sctp_chunkhdr *ch;
4089 struct sctp_init_chunk *init_chk, chunk_buf;
4090 int offset;
4091 unsigned int chk_length;
4092
4093 offset = iphlen + sizeof(struct sctphdr);
4094 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4095 (uint8_t *) & chunk_buf);
4096 while (ch != NULL) {
4097 chk_length = ntohs(ch->chunk_length);
4098 if (chk_length < sizeof(*ch)) {
4099 /* packet is probably corrupt */
4100 break;
4101 }
4102 /* we seem to be ok, is it an abort? */
4103 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4104 /* yep, tell them */
4105 return (1);
4106 }
4107 if (ch->chunk_type == SCTP_INITIATION) {
4108 /* need to update the Vtag */
4109 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4110 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4111 if (init_chk != NULL) {
4112 *vtagfill = ntohl(init_chk->init.initiate_tag);
4113 }
4114 }
4115 /* Nope, move to the next chunk */
4116 offset += SCTP_SIZE32(chk_length);
4117 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4118 sizeof(*ch), (uint8_t *) & chunk_buf);
4119 }
4120 return (0);
4121 }
4122
4123 /*
4124 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4125 * set (i.e. it's 0) so, create this function to compare link local scopes
4126 */
4127 #ifdef INET6
4128 uint32_t
4129 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4130 {
4131 struct sockaddr_in6 a, b;
4132
4133 /* save copies */
4134 a = *addr1;
4135 b = *addr2;
4136
4137 if (a.sin6_scope_id == 0)
4138 if (sa6_recoverscope(&a)) {
4139 /* can't get scope, so can't match */
4140 return (0);
4141 }
4142 if (b.sin6_scope_id == 0)
4143 if (sa6_recoverscope(&b)) {
4144 /* can't get scope, so can't match */
4145 return (0);
4146 }
4147 if (a.sin6_scope_id != b.sin6_scope_id)
4148 return (0);
4149
4150 return (1);
4151 }
4152
4153 /*
4154 * returns a sockaddr_in6 with embedded scope recovered and removed
4155 */
4156 struct sockaddr_in6 *
4157 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4158 {
4159 /* check and strip embedded scope junk */
4160 if (addr->sin6_family == AF_INET6) {
4161 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4162 if (addr->sin6_scope_id == 0) {
4163 *store = *addr;
4164 if (!sa6_recoverscope(store)) {
4165 /* use the recovered scope */
4166 addr = store;
4167 }
4168 } else {
4169 /* else, return the original "to" addr */
4170 in6_clearscope(&addr->sin6_addr);
4171 }
4172 }
4173 }
4174 return (addr);
4175 }
4176
4177 #endif
4178
4179 /*
4180 * are the two addresses the same? currently a "scopeless" check returns: 1
4181 * if same, 0 if not
4182 */
4183 int
4184 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4185 {
4186
4187 /* must be valid */
4188 if (sa1 == NULL || sa2 == NULL)
4189 return (0);
4190
4191 /* must be the same family */
4192 if (sa1->sa_family != sa2->sa_family)
4193 return (0);
4194
4195 switch (sa1->sa_family) {
4196 #ifdef INET6
4197 case AF_INET6:
4198 {
4199 /* IPv6 addresses */
4200 struct sockaddr_in6 *sin6_1, *sin6_2;
4201
4202 sin6_1 = (struct sockaddr_in6 *)sa1;
4203 sin6_2 = (struct sockaddr_in6 *)sa2;
4204 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4205 sin6_2));
4206 }
4207 #endif
4208 case AF_INET:
4209 {
4210 /* IPv4 addresses */
4211 struct sockaddr_in *sin_1, *sin_2;
4212
4213 sin_1 = (struct sockaddr_in *)sa1;
4214 sin_2 = (struct sockaddr_in *)sa2;
4215 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4216 }
4217 default:
4218 /* we don't do these... */
4219 return (0);
4220 }
4221 }
4222
4223 void
4224 sctp_print_address(struct sockaddr *sa)
4225 {
4226 #ifdef INET6
4227 char ip6buf[INET6_ADDRSTRLEN];
4228
4229 ip6buf[0] = 0;
4230 #endif
4231
4232 switch (sa->sa_family) {
4233 #ifdef INET6
4234 case AF_INET6:
4235 {
4236 struct sockaddr_in6 *sin6;
4237
4238 sin6 = (struct sockaddr_in6 *)sa;
4239 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4240 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4241 ntohs(sin6->sin6_port),
4242 sin6->sin6_scope_id);
4243 break;
4244 }
4245 #endif
4246 case AF_INET:
4247 {
4248 struct sockaddr_in *sin;
4249 unsigned char *p;
4250
4251 sin = (struct sockaddr_in *)sa;
4252 p = (unsigned char *)&sin->sin_addr;
4253 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4254 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4255 break;
4256 }
4257 default:
4258 SCTP_PRINTF("?\n");
4259 break;
4260 }
4261 }
4262
4263 void
4264 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4265 {
4266 switch (iph->ip_v) {
4267 case IPVERSION:
4268 {
4269 struct sockaddr_in lsa, fsa;
4270
4271 bzero(&lsa, sizeof(lsa));
4272 lsa.sin_len = sizeof(lsa);
4273 lsa.sin_family = AF_INET;
4274 lsa.sin_addr = iph->ip_src;
4275 lsa.sin_port = sh->src_port;
4276 bzero(&fsa, sizeof(fsa));
4277 fsa.sin_len = sizeof(fsa);
4278 fsa.sin_family = AF_INET;
4279 fsa.sin_addr = iph->ip_dst;
4280 fsa.sin_port = sh->dest_port;
4281 SCTP_PRINTF("src: ");
4282 sctp_print_address((struct sockaddr *)&lsa);
4283 SCTP_PRINTF("dest: ");
4284 sctp_print_address((struct sockaddr *)&fsa);
4285 break;
4286 }
4287 #ifdef INET6
4288 case IPV6_VERSION >> 4:
4289 {
4290 struct ip6_hdr *ip6;
4291 struct sockaddr_in6 lsa6, fsa6;
4292
4293 ip6 = (struct ip6_hdr *)iph;
4294 bzero(&lsa6, sizeof(lsa6));
4295 lsa6.sin6_len = sizeof(lsa6);
4296 lsa6.sin6_family = AF_INET6;
4297 lsa6.sin6_addr = ip6->ip6_src;
4298 lsa6.sin6_port = sh->src_port;
4299 bzero(&fsa6, sizeof(fsa6));
4300 fsa6.sin6_len = sizeof(fsa6);
4301 fsa6.sin6_family = AF_INET6;
4302 fsa6.sin6_addr = ip6->ip6_dst;
4303 fsa6.sin6_port = sh->dest_port;
4304 SCTP_PRINTF("src: ");
4305 sctp_print_address((struct sockaddr *)&lsa6);
4306 SCTP_PRINTF("dest: ");
4307 sctp_print_address((struct sockaddr *)&fsa6);
4308 break;
4309 }
4310 #endif
4311 default:
4312 /* TSNH */
4313 break;
4314 }
4315 }
4316
4317 void
4318 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4319 struct sctp_inpcb *new_inp,
4320 struct sctp_tcb *stcb,
4321 int waitflags)
4322 {
4323 /*
4324 * go through our old INP and pull off any control structures that
4325 * belong to stcb and move then to the new inp.
4326 */
4327 struct socket *old_so, *new_so;
4328 struct sctp_queued_to_read *control, *nctl;
4329 struct sctp_readhead tmp_queue;
4330 struct mbuf *m;
4331 int error = 0;
4332
4333 old_so = old_inp->sctp_socket;
4334 new_so = new_inp->sctp_socket;
4335 TAILQ_INIT(&tmp_queue);
4336 error = sblock(&old_so->so_rcv, waitflags);
4337 if (error) {
4338 /*
4339 * Gak, can't get sblock, we have a problem. data will be
4340 * left stranded.. and we don't dare look at it since the
4341 * other thread may be reading something. Oh well, its a
4342 * screwed up app that does a peeloff OR a accept while
4343 * reading from the main socket... actually its only the
4344 * peeloff() case, since I think read will fail on a
4345 * listening socket..
4346 */
4347 return;
4348 }
4349 /* lock the socket buffers */
4350 SCTP_INP_READ_LOCK(old_inp);
4351 control = TAILQ_FIRST(&old_inp->read_queue);
4352 /* Pull off all for out target stcb */
4353 while (control) {
4354 nctl = TAILQ_NEXT(control, next);
4355 if (control->stcb == stcb) {
4356 /* remove it we want it */
4357 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4358 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4359 m = control->data;
4360 while (m) {
4361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4362 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4363 }
4364 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4366 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4367 }
4368 m = SCTP_BUF_NEXT(m);
4369 }
4370 }
4371 control = nctl;
4372 }
4373 SCTP_INP_READ_UNLOCK(old_inp);
4374 /* Remove the sb-lock on the old socket */
4375
4376 sbunlock(&old_so->so_rcv);
4377 /* Now we move them over to the new socket buffer */
4378 control = TAILQ_FIRST(&tmp_queue);
4379 SCTP_INP_READ_LOCK(new_inp);
4380 while (control) {
4381 nctl = TAILQ_NEXT(control, next);
4382 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4383 m = control->data;
4384 while (m) {
4385 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4386 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4387 }
4388 sctp_sballoc(stcb, &new_so->so_rcv, m);
4389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4390 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4391 }
4392 m = SCTP_BUF_NEXT(m);
4393 }
4394 control = nctl;
4395 }
4396 SCTP_INP_READ_UNLOCK(new_inp);
4397 }
4398
4399 void
4400 sctp_add_to_readq(struct sctp_inpcb *inp,
4401 struct sctp_tcb *stcb,
4402 struct sctp_queued_to_read *control,
4403 struct sockbuf *sb,
4404 int end,
4405 int so_locked
4406 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4407 SCTP_UNUSED
4408 #endif
4409 )
4410 {
4411 /*
4412 * Here we must place the control on the end of the socket read
4413 * queue AND increment sb_cc so that select will work properly on
4414 * read.
4415 */
4416 struct mbuf *m, *prev = NULL;
4417
4418 if (inp == NULL) {
4419 /* Gak, TSNH!! */
4420 #ifdef INVARIANTS
4421 panic("Gak, inp NULL on add_to_readq");
4422 #endif
4423 return;
4424 }
4425 SCTP_INP_READ_LOCK(inp);
4426 if (!(control->spec_flags & M_NOTIFICATION)) {
4427 atomic_add_int(&inp->total_recvs, 1);
4428 if (!control->do_not_ref_stcb) {
4429 atomic_add_int(&stcb->total_recvs, 1);
4430 }
4431 }
4432 m = control->data;
4433 control->held_length = 0;
4434 control->length = 0;
4435 while (m) {
4436 if (SCTP_BUF_LEN(m) == 0) {
4437 /* Skip mbufs with NO length */
4438 if (prev == NULL) {
4439 /* First one */
4440 control->data = sctp_m_free(m);
4441 m = control->data;
4442 } else {
4443 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4444 m = SCTP_BUF_NEXT(prev);
4445 }
4446 if (m == NULL) {
4447 control->tail_mbuf = prev;;
4448 }
4449 continue;
4450 }
4451 prev = m;
4452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4453 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4454 }
4455 sctp_sballoc(stcb, sb, m);
4456 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4457 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4458 }
4459 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4460 m = SCTP_BUF_NEXT(m);
4461 }
4462 if (prev != NULL) {
4463 control->tail_mbuf = prev;
4464 } else {
4465 /* Everything got collapsed out?? */
4466 SCTP_INP_READ_UNLOCK(inp);
4467 return;
4468 }
4469 if (end) {
4470 control->end_added = 1;
4471 }
4472 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4473 SCTP_INP_READ_UNLOCK(inp);
4474 if (inp && inp->sctp_socket) {
4475 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4476 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4477 } else {
4478 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4479 struct socket *so;
4480
4481 so = SCTP_INP_SO(inp);
4482 if (!so_locked) {
4483 atomic_add_int(&stcb->asoc.refcnt, 1);
4484 SCTP_TCB_UNLOCK(stcb);
4485 SCTP_SOCKET_LOCK(so, 1);
4486 SCTP_TCB_LOCK(stcb);
4487 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4488 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4489 SCTP_SOCKET_UNLOCK(so, 1);
4490 return;
4491 }
4492 }
4493 #endif
4494 sctp_sorwakeup(inp, inp->sctp_socket);
4495 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4496 if (!so_locked) {
4497 SCTP_SOCKET_UNLOCK(so, 1);
4498 }
4499 #endif
4500 }
4501 }
4502 }
4503
4504
4505 int
4506 sctp_append_to_readq(struct sctp_inpcb *inp,
4507 struct sctp_tcb *stcb,
4508 struct sctp_queued_to_read *control,
4509 struct mbuf *m,
4510 int end,
4511 int ctls_cumack,
4512 struct sockbuf *sb)
4513 {
4514 /*
4515 * A partial delivery API event is underway. OR we are appending on
4516 * the reassembly queue.
4517 *
4518 * If PDAPI this means we need to add m to the end of the data.
4519 * Increase the length in the control AND increment the sb_cc.
4520 * Otherwise sb is NULL and all we need to do is put it at the end
4521 * of the mbuf chain.
4522 */
4523 int len = 0;
4524 struct mbuf *mm, *tail = NULL, *prev = NULL;
4525
4526 if (inp) {
4527 SCTP_INP_READ_LOCK(inp);
4528 }
4529 if (control == NULL) {
4530 get_out:
4531 if (inp) {
4532 SCTP_INP_READ_UNLOCK(inp);
4533 }
4534 return (-1);
4535 }
4536 if (control->end_added) {
4537 /* huh this one is complete? */
4538 goto get_out;
4539 }
4540 mm = m;
4541 if (mm == NULL) {
4542 goto get_out;
4543 }
4544 while (mm) {
4545 if (SCTP_BUF_LEN(mm) == 0) {
4546 /* Skip mbufs with NO lenght */
4547 if (prev == NULL) {
4548 /* First one */
4549 m = sctp_m_free(mm);
4550 mm = m;
4551 } else {
4552 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4553 mm = SCTP_BUF_NEXT(prev);
4554 }
4555 continue;
4556 }
4557 prev = mm;
4558 len += SCTP_BUF_LEN(mm);
4559 if (sb) {
4560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4561 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4562 }
4563 sctp_sballoc(stcb, sb, mm);
4564 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4565 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4566 }
4567 }
4568 mm = SCTP_BUF_NEXT(mm);
4569 }
4570 if (prev) {
4571 tail = prev;
4572 } else {
4573 /* Really there should always be a prev */
4574 if (m == NULL) {
4575 /* Huh nothing left? */
4576 #ifdef INVARIANTS
4577 panic("Nothing left to add?");
4578 #else
4579 goto get_out;
4580 #endif
4581 }
4582 tail = m;
4583 }
4584 if (control->tail_mbuf) {
4585 /* append */
4586 SCTP_BUF_NEXT(control->tail_mbuf) = m;
4587 control->tail_mbuf = tail;
4588 } else {
4589 /* nothing there */
4590 #ifdef INVARIANTS
4591 if (control->data != NULL) {
4592 panic("This should NOT happen");
4593 }
4594 #endif
4595 control->data = m;
4596 control->tail_mbuf = tail;
4597 }
4598 atomic_add_int(&control->length, len);
4599 if (end) {
4600 /* message is complete */
4601 if (stcb && (control == stcb->asoc.control_pdapi)) {
4602 stcb->asoc.control_pdapi = NULL;
4603 }
4604 control->held_length = 0;
4605 control->end_added = 1;
4606 }
4607 if (stcb == NULL) {
4608 control->do_not_ref_stcb = 1;
4609 }
4610 /*
4611 * When we are appending in partial delivery, the cum-ack is used
4612 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4613 * is populated in the outbound sinfo structure from the true cumack
4614 * if the association exists...
4615 */
4616 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4617 if (inp) {
4618 SCTP_INP_READ_UNLOCK(inp);
4619 }
4620 if (inp && inp->sctp_socket) {
4621 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4622 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4623 } else {
4624 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4625 struct socket *so;
4626
4627 so = SCTP_INP_SO(inp);
4628 atomic_add_int(&stcb->asoc.refcnt, 1);
4629 SCTP_TCB_UNLOCK(stcb);
4630 SCTP_SOCKET_LOCK(so, 1);
4631 SCTP_TCB_LOCK(stcb);
4632 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4633 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4634 SCTP_SOCKET_UNLOCK(so, 1);
4635 return (0);
4636 }
4637 #endif
4638 sctp_sorwakeup(inp, inp->sctp_socket);
4639 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4640 SCTP_SOCKET_UNLOCK(so, 1);
4641 #endif
4642 }
4643 }
4644 return (0);
4645 }
4646
4647
4648
4649 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4650 *************ALTERNATE ROUTING CODE
4651 */
4652
4653 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4654 *************ALTERNATE ROUTING CODE
4655 */
4656
4657 struct mbuf *
4658 sctp_generate_invmanparam(int err)
4659 {
4660 /* Return a MBUF with a invalid mandatory parameter */
4661 struct mbuf *m;
4662
4663 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4664 if (m) {
4665 struct sctp_paramhdr *ph;
4666
4667 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4668 ph = mtod(m, struct sctp_paramhdr *);
4669 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4670 ph->param_type = htons(err);
4671 }
4672 return (m);
4673 }
4674
4675 #ifdef SCTP_MBCNT_LOGGING
4676 void
4677 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4678 struct sctp_tmit_chunk *tp1, int chk_cnt)
4679 {
4680 if (tp1->data == NULL) {
4681 return;
4682 }
4683 asoc->chunks_on_out_queue -= chk_cnt;
4684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4685 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4686 asoc->total_output_queue_size,
4687 tp1->book_size,
4688 0,
4689 tp1->mbcnt);
4690 }
4691 if (asoc->total_output_queue_size >= tp1->book_size) {
4692 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4693 } else {
4694 asoc->total_output_queue_size = 0;
4695 }
4696
4697 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4698 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4699 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4700 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4701 } else {
4702 stcb->sctp_socket->so_snd.sb_cc = 0;
4703
4704 }
4705 }
4706 }
4707
4708 #endif
4709
4710 int
4711 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4712 int reason, struct sctpchunk_listhead *queue, int so_locked
4713 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4714 SCTP_UNUSED
4715 #endif
4716 )
4717 {
4718 int ret_sz = 0;
4719 int notdone;
4720 uint8_t foundeom = 0;
4721
4722 do {
4723 ret_sz += tp1->book_size;
4724 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4725 if (tp1->data) {
4726 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4727 struct socket *so;
4728
4729 #endif
4730 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4731 sctp_flight_size_decrease(tp1);
4732 sctp_total_flight_decrease(stcb, tp1);
4733 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4734 sctp_m_freem(tp1->data);
4735 tp1->data = NULL;
4736 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4737 so = SCTP_INP_SO(stcb->sctp_ep);
4738 if (!so_locked) {
4739 atomic_add_int(&stcb->asoc.refcnt, 1);
4740 SCTP_TCB_UNLOCK(stcb);
4741 SCTP_SOCKET_LOCK(so, 1);
4742 SCTP_TCB_LOCK(stcb);
4743 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4744 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4745 /*
4746 * assoc was freed while we were
4747 * unlocked
4748 */
4749 SCTP_SOCKET_UNLOCK(so, 1);
4750 return (ret_sz);
4751 }
4752 }
4753 #endif
4754 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4755 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4756 if (!so_locked) {
4757 SCTP_SOCKET_UNLOCK(so, 1);
4758 }
4759 #endif
4760 }
4761 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4762 stcb->asoc.sent_queue_cnt_removeable--;
4763 }
4764 if (queue == &stcb->asoc.send_queue) {
4765 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4766 /* on to the sent queue */
4767 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4768 sctp_next);
4769 stcb->asoc.sent_queue_cnt++;
4770 }
4771 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4772 SCTP_DATA_NOT_FRAG) {
4773 /* not frag'ed we ae done */
4774 notdone = 0;
4775 foundeom = 1;
4776 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4777 /* end of frag, we are done */
4778 notdone = 0;
4779 foundeom = 1;
4780 } else {
4781 /*
4782 * Its a begin or middle piece, we must mark all of
4783 * it
4784 */
4785 notdone = 1;
4786 tp1 = TAILQ_NEXT(tp1, sctp_next);
4787 }
4788 } while (tp1 && notdone);
4789 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4790 /*
4791 * The multi-part message was scattered across the send and
4792 * sent queue.
4793 */
4794 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4795 /*
4796 * recurse throught the send_queue too, starting at the
4797 * beginning.
4798 */
4799 if (tp1) {
4800 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4801 &stcb->asoc.send_queue, so_locked);
4802 } else {
4803 SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4804 }
4805 }
4806 return (ret_sz);
4807 }
4808
4809 /*
4810 * checks to see if the given address, sa, is one that is currently known by
4811 * the kernel note: can't distinguish the same address on multiple interfaces
4812 * and doesn't handle multiple addresses with different zone/scope id's note:
4813 * ifa_ifwithaddr() compares the entire sockaddr struct
4814 */
4815 struct sctp_ifa *
4816 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4817 int holds_lock)
4818 {
4819 struct sctp_laddr *laddr;
4820
4821 if (holds_lock == 0) {
4822 SCTP_INP_RLOCK(inp);
4823 }
4824 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4825 if (laddr->ifa == NULL)
4826 continue;
4827 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4828 continue;
4829 if (addr->sa_family == AF_INET) {
4830 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4831 laddr->ifa->address.sin.sin_addr.s_addr) {
4832 /* found him. */
4833 if (holds_lock == 0) {
4834 SCTP_INP_RUNLOCK(inp);
4835 }
4836 return (laddr->ifa);
4837 break;
4838 }
4839 }
4840 #ifdef INET6
4841 if (addr->sa_family == AF_INET6) {
4842 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4843 &laddr->ifa->address.sin6)) {
4844 /* found him. */
4845 if (holds_lock == 0) {
4846 SCTP_INP_RUNLOCK(inp);
4847 }
4848 return (laddr->ifa);
4849 break;
4850 }
4851 }
4852 #endif
4853 }
4854 if (holds_lock == 0) {
4855 SCTP_INP_RUNLOCK(inp);
4856 }
4857 return (NULL);
4858 }
4859
4860 uint32_t
4861 sctp_get_ifa_hash_val(struct sockaddr *addr)
4862 {
4863 if (addr->sa_family == AF_INET) {
4864 struct sockaddr_in *sin;
4865
4866 sin = (struct sockaddr_in *)addr;
4867 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4868 } else if (addr->sa_family == AF_INET6) {
4869 struct sockaddr_in6 *sin6;
4870 uint32_t hash_of_addr;
4871
4872 sin6 = (struct sockaddr_in6 *)addr;
4873 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4874 sin6->sin6_addr.s6_addr32[1] +
4875 sin6->sin6_addr.s6_addr32[2] +
4876 sin6->sin6_addr.s6_addr32[3]);
4877 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4878 return (hash_of_addr);
4879 }
4880 return (0);
4881 }
4882
4883 struct sctp_ifa *
4884 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4885 {
4886 struct sctp_ifa *sctp_ifap;
4887 struct sctp_vrf *vrf;
4888 struct sctp_ifalist *hash_head;
4889 uint32_t hash_of_addr;
4890
4891 if (holds_lock == 0)
4892 SCTP_IPI_ADDR_RLOCK();
4893
4894 vrf = sctp_find_vrf(vrf_id);
4895 if (vrf == NULL) {
4896 stage_right:
4897 if (holds_lock == 0)
4898 SCTP_IPI_ADDR_RUNLOCK();
4899 return (NULL);
4900 }
4901 hash_of_addr = sctp_get_ifa_hash_val(addr);
4902
4903 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4904 if (hash_head == NULL) {
4905 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4906 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4907 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4908 sctp_print_address(addr);
4909 SCTP_PRINTF("No such bucket for address\n");
4910 if (holds_lock == 0)
4911 SCTP_IPI_ADDR_RUNLOCK();
4912
4913 return (NULL);
4914 }
4915 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4916 if (sctp_ifap == NULL) {
4917 #ifdef INVARIANTS
4918 panic("Huh LIST_FOREACH corrupt");
4919 goto stage_right;
4920 #else
4921 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4922 goto stage_right;
4923 #endif
4924 }
4925 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4926 continue;
4927 if (addr->sa_family == AF_INET) {
4928 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4929 sctp_ifap->address.sin.sin_addr.s_addr) {
4930 /* found him. */
4931 if (holds_lock == 0)
4932 SCTP_IPI_ADDR_RUNLOCK();
4933 return (sctp_ifap);
4934 break;
4935 }
4936 }
4937 #ifdef INET6
4938 if (addr->sa_family == AF_INET6) {
4939 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4940 &sctp_ifap->address.sin6)) {
4941 /* found him. */
4942 if (holds_lock == 0)
4943 SCTP_IPI_ADDR_RUNLOCK();
4944 return (sctp_ifap);
4945 break;
4946 }
4947 }
4948 #endif
4949 }
4950 if (holds_lock == 0)
4951 SCTP_IPI_ADDR_RUNLOCK();
4952 return (NULL);
4953 }
4954
4955 static void
4956 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4957 uint32_t rwnd_req)
4958 {
4959 /* User pulled some data, do we need a rwnd update? */
4960 int r_unlocked = 0;
4961 uint32_t dif, rwnd;
4962 struct socket *so = NULL;
4963
4964 if (stcb == NULL)
4965 return;
4966
4967 atomic_add_int(&stcb->asoc.refcnt, 1);
4968
4969 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4970 SCTP_STATE_SHUTDOWN_RECEIVED |
4971 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4972 /* Pre-check If we are freeing no update */
4973 goto no_lock;
4974 }
4975 SCTP_INP_INCR_REF(stcb->sctp_ep);
4976 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4977 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4978 goto out;
4979 }
4980 so = stcb->sctp_socket;
4981 if (so == NULL) {
4982 goto out;
4983 }
4984 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4985 /* Have you have freed enough to look */
4986 *freed_so_far = 0;
4987 /* Yep, its worth a look and the lock overhead */
4988
4989 /* Figure out what the rwnd would be */
4990 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4991 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4992 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4993 } else {
4994 dif = 0;
4995 }
4996 if (dif >= rwnd_req) {
4997 if (hold_rlock) {
4998 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4999 r_unlocked = 1;
5000 }
5001 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5002 /*
5003 * One last check before we allow the guy possibly
5004 * to get in. There is a race, where the guy has not
5005 * reached the gate. In that case
5006 */
5007 goto out;
5008 }
5009 SCTP_TCB_LOCK(stcb);
5010 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5011 /* No reports here */
5012 SCTP_TCB_UNLOCK(stcb);
5013 goto out;
5014 }
5015 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5016 /*
5017 * EY if nr_sacks used then send an nr-sack , a sack
5018 * otherwise
5019 */
5020 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5021 sctp_send_nr_sack(stcb);
5022 else
5023 sctp_send_sack(stcb);
5024
5025 sctp_chunk_output(stcb->sctp_ep, stcb,
5026 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5027 /* make sure no timer is running */
5028 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5029 SCTP_TCB_UNLOCK(stcb);
5030 } else {
5031 /* Update how much we have pending */
5032 stcb->freed_by_sorcv_sincelast = dif;
5033 }
5034 out:
5035 if (so && r_unlocked && hold_rlock) {
5036 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5037 }
5038 SCTP_INP_DECR_REF(stcb->sctp_ep);
5039 no_lock:
5040 atomic_add_int(&stcb->asoc.refcnt, -1);
5041 return;
5042 }
5043
5044 int
5045 sctp_sorecvmsg(struct socket *so,
5046 struct uio *uio,
5047 struct mbuf **mp,
5048 struct sockaddr *from,
5049 int fromlen,
5050 int *msg_flags,
5051 struct sctp_sndrcvinfo *sinfo,
5052 int filling_sinfo)
5053 {
5054 /*
5055 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5056 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5057 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5058 * On the way out we may send out any combination of:
5059 * MSG_NOTIFICATION MSG_EOR
5060 *
5061 */
5062 struct sctp_inpcb *inp = NULL;
5063 int my_len = 0;
5064 int cp_len = 0, error = 0;
5065 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5066 struct mbuf *m = NULL, *embuf = NULL;
5067 struct sctp_tcb *stcb = NULL;
5068 int wakeup_read_socket = 0;
5069 int freecnt_applied = 0;
5070 int out_flags = 0, in_flags = 0;
5071 int block_allowed = 1;
5072 uint32_t freed_so_far = 0;
5073 uint32_t copied_so_far = 0;
5074 int in_eeor_mode = 0;
5075 int no_rcv_needed = 0;
5076 uint32_t rwnd_req = 0;
5077 int hold_sblock = 0;
5078 int hold_rlock = 0;
5079 int slen = 0;
5080 uint32_t held_length = 0;
5081 int sockbuf_lock = 0;
5082
5083 if (uio == NULL) {
5084 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5085 return (EINVAL);
5086 }
5087 if (msg_flags) {
5088 in_flags = *msg_flags;
5089 if (in_flags & MSG_PEEK)
5090 SCTP_STAT_INCR(sctps_read_peeks);
5091 } else {
5092 in_flags = 0;
5093 }
5094 slen = uio->uio_resid;
5095
5096 /* Pull in and set up our int flags */
5097 if (in_flags & MSG_OOB) {
5098 /* Out of band's NOT supported */
5099 return (EOPNOTSUPP);
5100 }
5101 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5102 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5103 return (EINVAL);
5104 }
5105 if ((in_flags & (MSG_DONTWAIT
5106 | MSG_NBIO
5107 )) ||
5108 SCTP_SO_IS_NBIO(so)) {
5109 block_allowed = 0;
5110 }
5111 /* setup the endpoint */
5112 inp = (struct sctp_inpcb *)so->so_pcb;
5113 if (inp == NULL) {
5114 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5115 return (EFAULT);
5116 }
5117 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5118 /* Must be at least a MTU's worth */
5119 if (rwnd_req < SCTP_MIN_RWND)
5120 rwnd_req = SCTP_MIN_RWND;
5121 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5123 sctp_misc_ints(SCTP_SORECV_ENTER,
5124 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5125 }
5126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5127 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5128 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5129 }
5130 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5131 sockbuf_lock = 1;
5132 if (error) {
5133 goto release_unlocked;
5134 }
5135 restart:
5136
5137
5138 restart_nosblocks:
5139 if (hold_sblock == 0) {
5140 SOCKBUF_LOCK(&so->so_rcv);
5141 hold_sblock = 1;
5142 }
5143 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5144 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5145 goto out;
5146 }
5147 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5148 if (so->so_error) {
5149 error = so->so_error;
5150 if ((in_flags & MSG_PEEK) == 0)
5151 so->so_error = 0;
5152 goto out;
5153 } else {
5154 if (so->so_rcv.sb_cc == 0) {
5155 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5156 /* indicate EOF */
5157 error = 0;
5158 goto out;
5159 }
5160 }
5161 }
5162 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5163 /* we need to wait for data */
5164 if ((so->so_rcv.sb_cc == 0) &&
5165 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5166 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5167 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5168 /*
5169 * For active open side clear flags for
5170 * re-use passive open is blocked by
5171 * connect.
5172 */
5173 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5174 /*
5175 * You were aborted, passive side
5176 * always hits here
5177 */
5178 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5179 error = ECONNRESET;
5180 /*
5181 * You get this once if you are
5182 * active open side
5183 */
5184 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5185 /*
5186 * Remove flag if on the
5187 * active open side
5188 */
5189 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5190 }
5191 }
5192 so->so_state &= ~(SS_ISCONNECTING |
5193 SS_ISDISCONNECTING |
5194 SS_ISCONFIRMING |
5195 SS_ISCONNECTED);
5196 if (error == 0) {
5197 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5198 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5199 error = ENOTCONN;
5200 } else {
5201 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5202 }
5203 }
5204 goto out;
5205 }
5206 }
5207 error = sbwait(&so->so_rcv);
5208 if (error) {
5209 goto out;
5210 }
5211 held_length = 0;
5212 goto restart_nosblocks;
5213 } else if (so->so_rcv.sb_cc == 0) {
5214 if (so->so_error) {
5215 error = so->so_error;
5216 if ((in_flags & MSG_PEEK) == 0)
5217 so->so_error = 0;
5218 } else {
5219 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5220 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5221 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5222 /*
5223 * For active open side clear flags
5224 * for re-use passive open is
5225 * blocked by connect.
5226 */
5227 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5228 /*
5229 * You were aborted, passive
5230 * side always hits here
5231 */
5232 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5233 error = ECONNRESET;
5234 /*
5235 * You get this once if you
5236 * are active open side
5237 */
5238 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5239 /*
5240 * Remove flag if on
5241 * the active open
5242 * side
5243 */
5244 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5245 }
5246 }
5247 so->so_state &= ~(SS_ISCONNECTING |
5248 SS_ISDISCONNECTING |
5249 SS_ISCONFIRMING |
5250 SS_ISCONNECTED);
5251 if (error == 0) {
5252 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5253 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5254 error = ENOTCONN;
5255 } else {
5256 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5257 }
5258 }
5259 goto out;
5260 }
5261 }
5262 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5263 error = EWOULDBLOCK;
5264 }
5265 goto out;
5266 }
5267 if (hold_sblock == 1) {
5268 SOCKBUF_UNLOCK(&so->so_rcv);
5269 hold_sblock = 0;
5270 }
5271 /* we possibly have data we can read */
5272 /* sa_ignore FREED_MEMORY */
5273 control = TAILQ_FIRST(&inp->read_queue);
5274 if (control == NULL) {
5275 /*
5276 * This could be happening since the appender did the
5277 * increment but as not yet did the tailq insert onto the
5278 * read_queue
5279 */
5280 if (hold_rlock == 0) {
5281 SCTP_INP_READ_LOCK(inp);
5282 hold_rlock = 1;
5283 }
5284 control = TAILQ_FIRST(&inp->read_queue);
5285 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5286 #ifdef INVARIANTS
5287 panic("Huh, its non zero and nothing on control?");
5288 #endif
5289 so->so_rcv.sb_cc = 0;
5290 }
5291 SCTP_INP_READ_UNLOCK(inp);
5292 hold_rlock = 0;
5293 goto restart;
5294 }
5295 if ((control->length == 0) &&
5296 (control->do_not_ref_stcb)) {
5297 /*
5298 * Clean up code for freeing assoc that left behind a
5299 * pdapi.. maybe a peer in EEOR that just closed after
5300 * sending and never indicated a EOR.
5301 */
5302 if (hold_rlock == 0) {
5303 hold_rlock = 1;
5304 SCTP_INP_READ_LOCK(inp);
5305 }
5306 control->held_length = 0;
5307 if (control->data) {
5308 /* Hmm there is data here .. fix */
5309 struct mbuf *m_tmp;
5310 int cnt = 0;
5311
5312 m_tmp = control->data;
5313 while (m_tmp) {
5314 cnt += SCTP_BUF_LEN(m_tmp);
5315 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5316 control->tail_mbuf = m_tmp;
5317 control->end_added = 1;
5318 }
5319 m_tmp = SCTP_BUF_NEXT(m_tmp);
5320 }
5321 control->length = cnt;
5322 } else {
5323 /* remove it */
5324 TAILQ_REMOVE(&inp->read_queue, control, next);
5325 /* Add back any hiddend data */
5326 sctp_free_remote_addr(control->whoFrom);
5327 sctp_free_a_readq(stcb, control);
5328 }
5329 if (hold_rlock) {
5330 hold_rlock = 0;
5331 SCTP_INP_READ_UNLOCK(inp);
5332 }
5333 goto restart;
5334 }
5335 if (control->length == 0) {
5336 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5337 (filling_sinfo)) {
5338 /* find a more suitable one then this */
5339 ctl = TAILQ_NEXT(control, next);
5340 while (ctl) {
5341 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5342 (ctl->some_taken ||
5343 (ctl->spec_flags & M_NOTIFICATION) ||
5344 ((ctl->do_not_ref_stcb == 0) &&
5345 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5346 ) {
5347 /*-
5348 * If we have a different TCB next, and there is data
5349 * present. If we have already taken some (pdapi), OR we can
5350 * ref the tcb and no delivery as started on this stream, we
5351 * take it. Note we allow a notification on a different
5352 * assoc to be delivered..
5353 */
5354 control = ctl;
5355 goto found_one;
5356 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5357 (ctl->length) &&
5358 ((ctl->some_taken) ||
5359 ((ctl->do_not_ref_stcb == 0) &&
5360 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5361 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5362 ) {
5363 /*-
5364 * If we have the same tcb, and there is data present, and we
5365 * have the strm interleave feature present. Then if we have
5366 * taken some (pdapi) or we can refer to tht tcb AND we have
5367 * not started a delivery for this stream, we can take it.
5368 * Note we do NOT allow a notificaiton on the same assoc to
5369 * be delivered.
5370 */
5371 control = ctl;
5372 goto found_one;
5373 }
5374 ctl = TAILQ_NEXT(ctl, next);
5375 }
5376 }
5377 /*
5378 * if we reach here, not suitable replacement is available
5379 * <or> fragment interleave is NOT on. So stuff the sb_cc
5380 * into the our held count, and its time to sleep again.
5381 */
5382 held_length = so->so_rcv.sb_cc;
5383 control->held_length = so->so_rcv.sb_cc;
5384 goto restart;
5385 }
5386 /* Clear the held length since there is something to read */
5387 control->held_length = 0;
5388 if (hold_rlock) {
5389 SCTP_INP_READ_UNLOCK(inp);
5390 hold_rlock = 0;
5391 }
5392 found_one:
5393 /*
5394 * If we reach here, control has a some data for us to read off.
5395 * Note that stcb COULD be NULL.
5396 */
5397 control->some_taken++;
5398 if (hold_sblock) {
5399 SOCKBUF_UNLOCK(&so->so_rcv);
5400 hold_sblock = 0;
5401 }
5402 stcb = control->stcb;
5403 if (stcb) {
5404 if ((control->do_not_ref_stcb == 0) &&
5405 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5406 if (freecnt_applied == 0)
5407 stcb = NULL;
5408 } else if (control->do_not_ref_stcb == 0) {
5409 /* you can't free it on me please */
5410 /*
5411 * The lock on the socket buffer protects us so the
5412 * free code will stop. But since we used the
5413 * socketbuf lock and the sender uses the tcb_lock
5414 * to increment, we need to use the atomic add to
5415 * the refcnt
5416 */
5417 if (freecnt_applied) {
5418 #ifdef INVARIANTS
5419 panic("refcnt already incremented");
5420 #else
5421 printf("refcnt already incremented?\n");
5422 #endif
5423 } else {
5424 atomic_add_int(&stcb->asoc.refcnt, 1);
5425 freecnt_applied = 1;
5426 }
5427 /*
5428 * Setup to remember how much we have not yet told
5429 * the peer our rwnd has opened up. Note we grab the
5430 * value from the tcb from last time. Note too that
5431 * sack sending clears this when a sack is sent,
5432 * which is fine. Once we hit the rwnd_req, we then
5433 * will go to the sctp_user_rcvd() that will not
5434 * lock until it KNOWs it MUST send a WUP-SACK.
5435 */
5436 freed_so_far = stcb->freed_by_sorcv_sincelast;
5437 stcb->freed_by_sorcv_sincelast = 0;
5438 }
5439 }
5440 if (stcb &&
5441 ((control->spec_flags & M_NOTIFICATION) == 0) &&
5442 control->do_not_ref_stcb == 0) {
5443 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5444 }
5445 /* First lets get off the sinfo and sockaddr info */
5446 if ((sinfo) && filling_sinfo) {
5447 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5448 nxt = TAILQ_NEXT(control, next);
5449 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5450 struct sctp_extrcvinfo *s_extra;
5451
5452 s_extra = (struct sctp_extrcvinfo *)sinfo;
5453 if ((nxt) &&
5454 (nxt->length)) {
5455 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5456 if (nxt->sinfo_flags & SCTP_UNORDERED) {
5457 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5458 }
5459 if (nxt->spec_flags & M_NOTIFICATION) {
5460 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5461 }
5462 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5463 s_extra->sreinfo_next_length = nxt->length;
5464 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5465 s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5466 if (nxt->tail_mbuf != NULL) {
5467 if (nxt->end_added) {
5468 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5469 }
5470 }
5471 } else {
5472 /*
5473 * we explicitly 0 this, since the memcpy
5474 * got some other things beyond the older
5475 * sinfo_ that is on the control's structure
5476 * :-D
5477 */
5478 nxt = NULL;
5479 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5480 s_extra->sreinfo_next_aid = 0;
5481 s_extra->sreinfo_next_length = 0;
5482 s_extra->sreinfo_next_ppid = 0;
5483 s_extra->sreinfo_next_stream = 0;
5484 }
5485 }
5486 /*
5487 * update off the real current cum-ack, if we have an stcb.
5488 */
5489 if ((control->do_not_ref_stcb == 0) && stcb)
5490 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5491 /*
5492 * mask off the high bits, we keep the actual chunk bits in
5493 * there.
5494 */
5495 sinfo->sinfo_flags &= 0x00ff;
5496 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5497 sinfo->sinfo_flags |= SCTP_UNORDERED;
5498 }
5499 }
5500 #ifdef SCTP_ASOCLOG_OF_TSNS
5501 {
5502 int index, newindex;
5503 struct sctp_pcbtsn_rlog *entry;
5504
5505 do {
5506 index = inp->readlog_index;
5507 newindex = index + 1;
5508 if (newindex >= SCTP_READ_LOG_SIZE) {
5509 newindex = 0;
5510 }
5511 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5512 entry = &inp->readlog[index];
5513 entry->vtag = control->sinfo_assoc_id;
5514 entry->strm = control->sinfo_stream;
5515 entry->seq = control->sinfo_ssn;
5516 entry->sz = control->length;
5517 entry->flgs = control->sinfo_flags;
5518 }
5519 #endif
5520 if (fromlen && from) {
5521 struct sockaddr *to;
5522
5523 #ifdef INET
5524 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5525 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5526 ((struct sockaddr_in *)from)->sin_port = control->port_from;
5527 #else
5528 /* No AF_INET use AF_INET6 */
5529 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5530 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5531 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5532 #endif
5533
5534 to = from;
5535 #if defined(INET) && defined(INET6)
5536 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5537 (to->sa_family == AF_INET) &&
5538 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5539 struct sockaddr_in *sin;
5540 struct sockaddr_in6 sin6;
5541
5542 sin = (struct sockaddr_in *)to;
5543 bzero(&sin6, sizeof(sin6));
5544 sin6.sin6_family = AF_INET6;
5545 sin6.sin6_len = sizeof(struct sockaddr_in6);
5546 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5547 bcopy(&sin->sin_addr,
5548 &sin6.sin6_addr.s6_addr32[3],
5549 sizeof(sin6.sin6_addr.s6_addr32[3]));
5550 sin6.sin6_port = sin->sin_port;
5551 memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5552 }
5553 #endif
5554 #if defined(INET6)
5555 {
5556 struct sockaddr_in6 lsa6, *to6;
5557
5558 to6 = (struct sockaddr_in6 *)to;
5559 sctp_recover_scope_mac(to6, (&lsa6));
5560 }
5561 #endif
5562 }
5563 /* now copy out what data we can */
5564 if (mp == NULL) {
5565 /* copy out each mbuf in the chain up to length */
5566 get_more_data:
5567 m = control->data;
5568 while (m) {
5569 /* Move out all we can */
5570 cp_len = (int)uio->uio_resid;
5571 my_len = (int)SCTP_BUF_LEN(m);
5572 if (cp_len > my_len) {
5573 /* not enough in this buf */
5574 cp_len = my_len;
5575 }
5576 if (hold_rlock) {
5577 SCTP_INP_READ_UNLOCK(inp);
5578 hold_rlock = 0;
5579 }
5580 if (cp_len > 0)
5581 error = uiomove(mtod(m, char *), cp_len, uio);
5582 /* re-read */
5583 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5584 goto release;
5585 }
5586 if ((control->do_not_ref_stcb == 0) && stcb &&
5587 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5588 no_rcv_needed = 1;
5589 }
5590 if (error) {
5591 /* error we are out of here */
5592 goto release;
5593 }
5594 if ((SCTP_BUF_NEXT(m) == NULL) &&
5595 (cp_len >= SCTP_BUF_LEN(m)) &&
5596 ((control->end_added == 0) ||
5597 (control->end_added &&
5598 (TAILQ_NEXT(control, next) == NULL)))
5599 ) {
5600 SCTP_INP_READ_LOCK(inp);
5601 hold_rlock = 1;
5602 }
5603 if (cp_len == SCTP_BUF_LEN(m)) {
5604 if ((SCTP_BUF_NEXT(m) == NULL) &&
5605 (control->end_added)) {
5606 out_flags |= MSG_EOR;
5607 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5608 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5609 }
5610 if (control->spec_flags & M_NOTIFICATION) {
5611 out_flags |= MSG_NOTIFICATION;
5612 }
5613 /* we ate up the mbuf */
5614 if (in_flags & MSG_PEEK) {
5615 /* just looking */
5616 m = SCTP_BUF_NEXT(m);
5617 copied_so_far += cp_len;
5618 } else {
5619 /* dispose of the mbuf */
5620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5621 sctp_sblog(&so->so_rcv,
5622 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5623 }
5624 sctp_sbfree(control, stcb, &so->so_rcv, m);
5625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5626 sctp_sblog(&so->so_rcv,
5627 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5628 }
5629 embuf = m;
5630 copied_so_far += cp_len;
5631 freed_so_far += cp_len;
5632 freed_so_far += MSIZE;
5633 atomic_subtract_int(&control->length, cp_len);
5634 control->data = sctp_m_free(m);
5635 m = control->data;
5636 /*
5637 * been through it all, must hold sb
5638 * lock ok to null tail
5639 */
5640 if (control->data == NULL) {
5641 #ifdef INVARIANTS
5642 if ((control->end_added == 0) ||
5643 (TAILQ_NEXT(control, next) == NULL)) {
5644 /*
5645 * If the end is not
5646 * added, OR the
5647 * next is NOT null
5648 * we MUST have the
5649 * lock.
5650 */
5651 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5652 panic("Hmm we don't own the lock?");
5653 }
5654 }
5655 #endif
5656 control->tail_mbuf = NULL;
5657 #ifdef INVARIANTS
5658 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5659 panic("end_added, nothing left and no MSG_EOR");
5660 }
5661 #endif
5662 }
5663 }
5664 } else {
5665 /* Do we need to trim the mbuf? */
5666 if (control->spec_flags & M_NOTIFICATION) {
5667 out_flags |= MSG_NOTIFICATION;
5668 }
5669 if ((in_flags & MSG_PEEK) == 0) {
5670 SCTP_BUF_RESV_UF(m, cp_len);
5671 SCTP_BUF_LEN(m) -= cp_len;
5672 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5673 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5674 }
5675 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5676 if ((control->do_not_ref_stcb == 0) &&
5677 stcb) {
5678 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5679 }
5680 copied_so_far += cp_len;
5681 embuf = m;
5682 freed_so_far += cp_len;
5683 freed_so_far += MSIZE;
5684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5685 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5686 SCTP_LOG_SBRESULT, 0);
5687 }
5688 atomic_subtract_int(&control->length, cp_len);
5689 } else {
5690 copied_so_far += cp_len;
5691 }
5692 }
5693 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5694 break;
5695 }
5696 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5697 (control->do_not_ref_stcb == 0) &&
5698 (freed_so_far >= rwnd_req)) {
5699 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5700 }
5701 } /* end while(m) */
5702 /*
5703 * At this point we have looked at it all and we either have
5704 * a MSG_EOR/or read all the user wants... <OR>
5705 * control->length == 0.
5706 */
5707 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5708 /* we are done with this control */
5709 if (control->length == 0) {
5710 if (control->data) {
5711 #ifdef INVARIANTS
5712 panic("control->data not null at read eor?");
5713 #else
5714 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5715 sctp_m_freem(control->data);
5716 control->data = NULL;
5717 #endif
5718 }
5719 done_with_control:
5720 if (TAILQ_NEXT(control, next) == NULL) {
5721 /*
5722 * If we don't have a next we need a
5723 * lock, if there is a next
5724 * interrupt is filling ahead of us
5725 * and we don't need a lock to
5726 * remove this guy (which is the
5727 * head of the queue).
5728 */
5729 if (hold_rlock == 0) {
5730 SCTP_INP_READ_LOCK(inp);
5731 hold_rlock = 1;
5732 }
5733 }
5734 TAILQ_REMOVE(&inp->read_queue, control, next);
5735 /* Add back any hiddend data */
5736 if (control->held_length) {
5737 held_length = 0;
5738 control->held_length = 0;
5739 wakeup_read_socket = 1;
5740 }
5741 if (control->aux_data) {
5742 sctp_m_free(control->aux_data);
5743 control->aux_data = NULL;
5744 }
5745 no_rcv_needed = control->do_not_ref_stcb;
5746 sctp_free_remote_addr(control->whoFrom);
5747 control->data = NULL;
5748 sctp_free_a_readq(stcb, control);
5749 control = NULL;
5750 if ((freed_so_far >= rwnd_req) &&
5751 (no_rcv_needed == 0))
5752 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5753
5754 } else {
5755 /*
5756 * The user did not read all of this
5757 * message, turn off the returned MSG_EOR
5758 * since we are leaving more behind on the
5759 * control to read.
5760 */
5761 #ifdef INVARIANTS
5762 if (control->end_added &&
5763 (control->data == NULL) &&
5764 (control->tail_mbuf == NULL)) {
5765 panic("Gak, control->length is corrupt?");
5766 }
5767 #endif
5768 no_rcv_needed = control->do_not_ref_stcb;
5769 out_flags &= ~MSG_EOR;
5770 }
5771 }
5772 if (out_flags & MSG_EOR) {
5773 goto release;
5774 }
5775 if ((uio->uio_resid == 0) ||
5776 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5777 ) {
5778 goto release;
5779 }
5780 /*
5781 * If I hit here the receiver wants more and this message is
5782 * NOT done (pd-api). So two questions. Can we block? if not
5783 * we are done. Did the user NOT set MSG_WAITALL?
5784 */
5785 if (block_allowed == 0) {
5786 goto release;
5787 }
5788 /*
5789 * We need to wait for more data a few things: - We don't
5790 * sbunlock() so we don't get someone else reading. - We
5791 * must be sure to account for the case where what is added
5792 * is NOT to our control when we wakeup.
5793 */
5794
5795 /*
5796 * Do we need to tell the transport a rwnd update might be
5797 * needed before we go to sleep?
5798 */
5799 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5800 ((freed_so_far >= rwnd_req) &&
5801 (control->do_not_ref_stcb == 0) &&
5802 (no_rcv_needed == 0))) {
5803 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5804 }
5805 wait_some_more:
5806 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5807 goto release;
5808 }
5809 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5810 goto release;
5811
5812 if (hold_rlock == 1) {
5813 SCTP_INP_READ_UNLOCK(inp);
5814 hold_rlock = 0;
5815 }
5816 if (hold_sblock == 0) {
5817 SOCKBUF_LOCK(&so->so_rcv);
5818 hold_sblock = 1;
5819 }
5820 if ((copied_so_far) && (control->length == 0) &&
5821 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5822 ) {
5823 goto release;
5824 }
5825 if (so->so_rcv.sb_cc <= control->held_length) {
5826 error = sbwait(&so->so_rcv);
5827 if (error) {
5828 goto release;
5829 }
5830 control->held_length = 0;
5831 }
5832 if (hold_sblock) {
5833 SOCKBUF_UNLOCK(&so->so_rcv);
5834 hold_sblock = 0;
5835 }
5836 if (control->length == 0) {
5837 /* still nothing here */
5838 if (control->end_added == 1) {
5839 /* he aborted, or is done i.e.did a shutdown */
5840 out_flags |= MSG_EOR;
5841 if (control->pdapi_aborted) {
5842 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5843 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5844
5845 out_flags |= MSG_TRUNC;
5846 } else {
5847 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5848 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5849 }
5850 goto done_with_control;
5851 }
5852 if (so->so_rcv.sb_cc > held_length) {
5853 control->held_length = so->so_rcv.sb_cc;
5854 held_length = 0;
5855 }
5856 goto wait_some_more;
5857 } else if (control->data == NULL) {
5858 /*
5859 * we must re-sync since data is probably being
5860 * added
5861 */
5862 SCTP_INP_READ_LOCK(inp);
5863 if ((control->length > 0) && (control->data == NULL)) {
5864 /*
5865 * big trouble.. we have the lock and its
5866 * corrupt?
5867 */
5868 #ifdef INVARIANTS
5869 panic("Impossible data==NULL length !=0");
5870 #endif
5871 out_flags |= MSG_EOR;
5872 out_flags |= MSG_TRUNC;
5873 control->length = 0;
5874 SCTP_INP_READ_UNLOCK(inp);
5875 goto done_with_control;
5876 }
5877 SCTP_INP_READ_UNLOCK(inp);
5878 /* We will fall around to get more data */
5879 }
5880 goto get_more_data;
5881 } else {
5882 /*-
5883 * Give caller back the mbuf chain,
5884 * store in uio_resid the length
5885 */
5886 wakeup_read_socket = 0;
5887 if ((control->end_added == 0) ||
5888 (TAILQ_NEXT(control, next) == NULL)) {
5889 /* Need to get rlock */
5890 if (hold_rlock == 0) {
5891 SCTP_INP_READ_LOCK(inp);
5892 hold_rlock = 1;
5893 }
5894 }
5895 if (control->end_added) {
5896 out_flags |= MSG_EOR;
5897 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5898 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5899 }
5900 if (control->spec_flags & M_NOTIFICATION) {
5901 out_flags |= MSG_NOTIFICATION;
5902 }
5903 uio->uio_resid = control->length;
5904 *mp = control->data;
5905 m = control->data;
5906 while (m) {
5907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5908 sctp_sblog(&so->so_rcv,
5909 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5910 }
5911 sctp_sbfree(control, stcb, &so->so_rcv, m);
5912 freed_so_far += SCTP_BUF_LEN(m);
5913 freed_so_far += MSIZE;
5914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5915 sctp_sblog(&so->so_rcv,
5916 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5917 }
5918 m = SCTP_BUF_NEXT(m);
5919 }
5920 control->data = control->tail_mbuf = NULL;
5921 control->length = 0;
5922 if (out_flags & MSG_EOR) {
5923 /* Done with this control */
5924 goto done_with_control;
5925 }
5926 }
5927 release:
5928 if (hold_rlock == 1) {
5929 SCTP_INP_READ_UNLOCK(inp);
5930 hold_rlock = 0;
5931 }
5932 if (hold_sblock == 1) {
5933 SOCKBUF_UNLOCK(&so->so_rcv);
5934 hold_sblock = 0;
5935 }
5936 sbunlock(&so->so_rcv);
5937 sockbuf_lock = 0;
5938
5939 release_unlocked:
5940 if (hold_sblock) {
5941 SOCKBUF_UNLOCK(&so->so_rcv);
5942 hold_sblock = 0;
5943 }
5944 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5945 if ((freed_so_far >= rwnd_req) &&
5946 (control && (control->do_not_ref_stcb == 0)) &&
5947 (no_rcv_needed == 0))
5948 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5949 }
5950 out:
5951 if (msg_flags) {
5952 *msg_flags = out_flags;
5953 }
5954 if (((out_flags & MSG_EOR) == 0) &&
5955 ((in_flags & MSG_PEEK) == 0) &&
5956 (sinfo) &&
5957 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5958 struct sctp_extrcvinfo *s_extra;
5959
5960 s_extra = (struct sctp_extrcvinfo *)sinfo;
5961 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5962 }
5963 if (hold_rlock == 1) {
5964 SCTP_INP_READ_UNLOCK(inp);
5965 hold_rlock = 0;
5966 }
5967 if (hold_sblock) {
5968 SOCKBUF_UNLOCK(&so->so_rcv);
5969 hold_sblock = 0;
5970 }
5971 if (sockbuf_lock) {
5972 sbunlock(&so->so_rcv);
5973 }
5974 if (freecnt_applied) {
5975 /*
5976 * The lock on the socket buffer protects us so the free
5977 * code will stop. But since we used the socketbuf lock and
5978 * the sender uses the tcb_lock to increment, we need to use
5979 * the atomic add to the refcnt.
5980 */
5981 if (stcb == NULL) {
5982 #ifdef INVARIANTS
5983 panic("stcb for refcnt has gone NULL?");
5984 goto stage_left;
5985 #else
5986 goto stage_left;
5987 #endif
5988 }
5989 atomic_add_int(&stcb->asoc.refcnt, -1);
5990 freecnt_applied = 0;
5991 /* Save the value back for next time */
5992 stcb->freed_by_sorcv_sincelast = freed_so_far;
5993 }
5994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5995 if (stcb) {
5996 sctp_misc_ints(SCTP_SORECV_DONE,
5997 freed_so_far,
5998 ((uio) ? (slen - uio->uio_resid) : slen),
5999 stcb->asoc.my_rwnd,
6000 so->so_rcv.sb_cc);
6001 } else {
6002 sctp_misc_ints(SCTP_SORECV_DONE,
6003 freed_so_far,
6004 ((uio) ? (slen - uio->uio_resid) : slen),
6005 0,
6006 so->so_rcv.sb_cc);
6007 }
6008 }
6009 stage_left:
6010 if (wakeup_read_socket) {
6011 sctp_sorwakeup(inp, so);
6012 }
6013 return (error);
6014 }
6015
6016
6017 #ifdef SCTP_MBUF_LOGGING
6018 struct mbuf *
6019 sctp_m_free(struct mbuf *m)
6020 {
6021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6022 if (SCTP_BUF_IS_EXTENDED(m)) {
6023 sctp_log_mb(m, SCTP_MBUF_IFREE);
6024 }
6025 }
6026 return (m_free(m));
6027 }
6028
6029 void
6030 sctp_m_freem(struct mbuf *mb)
6031 {
6032 while (mb != NULL)
6033 mb = sctp_m_free(mb);
6034 }
6035
6036 #endif
6037
6038 int
6039 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6040 {
6041 /*
6042 * Given a local address. For all associations that holds the
6043 * address, request a peer-set-primary.
6044 */
6045 struct sctp_ifa *ifa;
6046 struct sctp_laddr *wi;
6047
6048 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6049 if (ifa == NULL) {
6050 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6051 return (EADDRNOTAVAIL);
6052 }
6053 /*
6054 * Now that we have the ifa we must awaken the iterator with this
6055 * message.
6056 */
6057 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6058 if (wi == NULL) {
6059 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6060 return (ENOMEM);
6061 }
6062 /* Now incr the count and int wi structure */
6063 SCTP_INCR_LADDR_COUNT();
6064 bzero(wi, sizeof(*wi));
6065 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6066 wi->ifa = ifa;
6067 wi->action = SCTP_SET_PRIM_ADDR;
6068 atomic_add_int(&ifa->refcount, 1);
6069
6070 /* Now add it to the work queue */
6071 SCTP_IPI_ITERATOR_WQ_LOCK();
6072 /*
6073 * Should this really be a tailq? As it is we will process the
6074 * newest first :-0
6075 */
6076 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6077 SCTP_IPI_ITERATOR_WQ_UNLOCK();
6078 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6079 (struct sctp_inpcb *)NULL,
6080 (struct sctp_tcb *)NULL,
6081 (struct sctp_nets *)NULL);
6082 return (0);
6083 }
6084
6085
6086 int
6087 sctp_soreceive(struct socket *so,
6088 struct sockaddr **psa,
6089 struct uio *uio,
6090 struct mbuf **mp0,
6091 struct mbuf **controlp,
6092 int *flagsp)
6093 {
6094 int error, fromlen;
6095 uint8_t sockbuf[256];
6096 struct sockaddr *from;
6097 struct sctp_extrcvinfo sinfo;
6098 int filling_sinfo = 1;
6099 struct sctp_inpcb *inp;
6100
6101 inp = (struct sctp_inpcb *)so->so_pcb;
6102 /* pickup the assoc we are reading from */
6103 if (inp == NULL) {
6104 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6105 return (EINVAL);
6106 }
6107 if ((sctp_is_feature_off(inp,
6108 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6109 (controlp == NULL)) {
6110 /* user does not want the sndrcv ctl */
6111 filling_sinfo = 0;
6112 }
6113 if (psa) {
6114 from = (struct sockaddr *)sockbuf;
6115 fromlen = sizeof(sockbuf);
6116 from->sa_len = 0;
6117 } else {
6118 from = NULL;
6119 fromlen = 0;
6120 }
6121
6122 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6123 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6124 if ((controlp) && (filling_sinfo)) {
6125 /* copy back the sinfo in a CMSG format */
6126 if (filling_sinfo)
6127 *controlp = sctp_build_ctl_nchunk(inp,
6128 (struct sctp_sndrcvinfo *)&sinfo);
6129 else
6130 *controlp = NULL;
6131 }
6132 if (psa) {
6133 /* copy back the address info */
6134 if (from && from->sa_len) {
6135 *psa = sodupsockaddr(from, M_NOWAIT);
6136 } else {
6137 *psa = NULL;
6138 }
6139 }
6140 return (error);
6141 }
6142
6143
6144 int
6145 sctp_l_soreceive(struct socket *so,
6146 struct sockaddr **name,
6147 struct uio *uio,
6148 char **controlp,
6149 int *controllen,
6150 int *flag)
6151 {
6152 int error, fromlen;
6153 uint8_t sockbuf[256];
6154 struct sockaddr *from;
6155 struct sctp_extrcvinfo sinfo;
6156 int filling_sinfo = 1;
6157 struct sctp_inpcb *inp;
6158
6159 inp = (struct sctp_inpcb *)so->so_pcb;
6160 /* pickup the assoc we are reading from */
6161 if (inp == NULL) {
6162 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6163 return (EINVAL);
6164 }
6165 if ((sctp_is_feature_off(inp,
6166 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6167 (controlp == NULL)) {
6168 /* user does not want the sndrcv ctl */
6169 filling_sinfo = 0;
6170 }
6171 if (name) {
6172 from = (struct sockaddr *)sockbuf;
6173 fromlen = sizeof(sockbuf);
6174 from->sa_len = 0;
6175 } else {
6176 from = NULL;
6177 fromlen = 0;
6178 }
6179
6180 error = sctp_sorecvmsg(so, uio,
6181 (struct mbuf **)NULL,
6182 from, fromlen, flag,
6183 (struct sctp_sndrcvinfo *)&sinfo,
6184 filling_sinfo);
6185 if ((controlp) && (filling_sinfo)) {
6186 /*
6187 * copy back the sinfo in a CMSG format note that the caller
6188 * has reponsibility for freeing the memory.
6189 */
6190 if (filling_sinfo)
6191 *controlp = sctp_build_ctl_cchunk(inp,
6192 controllen,
6193 (struct sctp_sndrcvinfo *)&sinfo);
6194 }
6195 if (name) {
6196 /* copy back the address info */
6197 if (from && from->sa_len) {
6198 *name = sodupsockaddr(from, M_WAIT);
6199 } else {
6200 *name = NULL;
6201 }
6202 }
6203 return (error);
6204 }
6205
6206
6207
6208
6209
6210
6211
6212 int
6213 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6214 int totaddr, int *error)
6215 {
6216 int added = 0;
6217 int i;
6218 struct sctp_inpcb *inp;
6219 struct sockaddr *sa;
6220 size_t incr = 0;
6221
6222 sa = addr;
6223 inp = stcb->sctp_ep;
6224 *error = 0;
6225 for (i = 0; i < totaddr; i++) {
6226 if (sa->sa_family == AF_INET) {
6227 incr = sizeof(struct sockaddr_in);
6228 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6229 /* assoc gone no un-lock */
6230 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6231 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6232 *error = ENOBUFS;
6233 goto out_now;
6234 }
6235 added++;
6236 } else if (sa->sa_family == AF_INET6) {
6237 incr = sizeof(struct sockaddr_in6);
6238 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6239 /* assoc gone no un-lock */
6240 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6241 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6242 *error = ENOBUFS;
6243 goto out_now;
6244 }
6245 added++;
6246 }
6247 sa = (struct sockaddr *)((caddr_t)sa + incr);
6248 }
6249 out_now:
6250 return (added);
6251 }
6252
6253 struct sctp_tcb *
6254 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6255 int *totaddr, int *num_v4, int *num_v6, int *error,
6256 int limit, int *bad_addr)
6257 {
6258 struct sockaddr *sa;
6259 struct sctp_tcb *stcb = NULL;
6260 size_t incr, at, i;
6261
6262 at = incr = 0;
6263 sa = addr;
6264 *error = *num_v6 = *num_v4 = 0;
6265 /* account and validate addresses */
6266 for (i = 0; i < (size_t)*totaddr; i++) {
6267 if (sa->sa_family == AF_INET) {
6268 (*num_v4) += 1;
6269 incr = sizeof(struct sockaddr_in);
6270 if (sa->sa_len != incr) {
6271 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6272 *error = EINVAL;
6273 *bad_addr = 1;
6274 return (NULL);
6275 }
6276 } else if (sa->sa_family == AF_INET6) {
6277 struct sockaddr_in6 *sin6;
6278
6279 sin6 = (struct sockaddr_in6 *)sa;
6280 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6281 /* Must be non-mapped for connectx */
6282 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6283 *error = EINVAL;
6284 *bad_addr = 1;
6285 return (NULL);
6286 }
6287 (*num_v6) += 1;
6288 incr = sizeof(struct sockaddr_in6);
6289 if (sa->sa_len != incr) {
6290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6291 *error = EINVAL;
6292 *bad_addr = 1;
6293 return (NULL);
6294 }
6295 } else {
6296 *totaddr = i;
6297 /* we are done */
6298 break;
6299 }
6300 SCTP_INP_INCR_REF(inp);
6301 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6302 if (stcb != NULL) {
6303 /* Already have or am bring up an association */
6304 return (stcb);
6305 } else {
6306 SCTP_INP_DECR_REF(inp);
6307 }
6308 if ((at + incr) > (size_t)limit) {
6309 *totaddr = i;
6310 break;
6311 }
6312 sa = (struct sockaddr *)((caddr_t)sa + incr);
6313 }
6314 return ((struct sctp_tcb *)NULL);
6315 }
6316
6317 /*
6318 * sctp_bindx(ADD) for one address.
6319 * assumes all arguments are valid/checked by caller.
6320 */
6321 void
6322 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6323 struct sockaddr *sa, sctp_assoc_t assoc_id,
6324 uint32_t vrf_id, int *error, void *p)
6325 {
6326 struct sockaddr *addr_touse;
6327
6328 #ifdef INET6
6329 struct sockaddr_in sin;
6330
6331 #endif
6332
6333 /* see if we're bound all already! */
6334 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6335 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6336 *error = EINVAL;
6337 return;
6338 }
6339 addr_touse = sa;
6340 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6341 if (sa->sa_family == AF_INET6) {
6342 struct sockaddr_in6 *sin6;
6343
6344 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6345 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6346 *error = EINVAL;
6347 return;
6348 }
6349 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6350 /* can only bind v6 on PF_INET6 sockets */
6351 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352 *error = EINVAL;
6353 return;
6354 }
6355 sin6 = (struct sockaddr_in6 *)addr_touse;
6356 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6357 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6358 SCTP_IPV6_V6ONLY(inp)) {
6359 /* can't bind v4-mapped on PF_INET sockets */
6360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 *error = EINVAL;
6362 return;
6363 }
6364 in6_sin6_2_sin(&sin, sin6);
6365 addr_touse = (struct sockaddr *)&sin;
6366 }
6367 }
6368 #endif
6369 if (sa->sa_family == AF_INET) {
6370 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 *error = EINVAL;
6373 return;
6374 }
6375 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6376 SCTP_IPV6_V6ONLY(inp)) {
6377 /* can't bind v4 on PF_INET sockets */
6378 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 *error = EINVAL;
6380 return;
6381 }
6382 }
6383 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6384 if (p == NULL) {
6385 /* Can't get proc for Net/Open BSD */
6386 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6387 *error = EINVAL;
6388 return;
6389 }
6390 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6391 return;
6392 }
6393 /*
6394 * No locks required here since bind and mgmt_ep_sa all do their own
6395 * locking. If we do something for the FIX: below we may need to
6396 * lock in that case.
6397 */
6398 if (assoc_id == 0) {
6399 /* add the address */
6400 struct sctp_inpcb *lep;
6401 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6402
6403 /* validate the incoming port */
6404 if ((lsin->sin_port != 0) &&
6405 (lsin->sin_port != inp->sctp_lport)) {
6406 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 *error = EINVAL;
6408 return;
6409 } else {
6410 /* user specified 0 port, set it to existing port */
6411 lsin->sin_port = inp->sctp_lport;
6412 }
6413
6414 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6415 if (lep != NULL) {
6416 /*
6417 * We must decrement the refcount since we have the
6418 * ep already and are binding. No remove going on
6419 * here.
6420 */
6421 SCTP_INP_DECR_REF(lep);
6422 }
6423 if (lep == inp) {
6424 /* already bound to it.. ok */
6425 return;
6426 } else if (lep == NULL) {
6427 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
6428 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6429 SCTP_ADD_IP_ADDRESS,
6430 vrf_id, NULL);
6431 } else {
6432 *error = EADDRINUSE;
6433 }
6434 if (*error)
6435 return;
6436 } else {
6437 /*
6438 * FIX: decide whether we allow assoc based bindx
6439 */
6440 }
6441 }
6442
6443 /*
6444 * sctp_bindx(DELETE) for one address.
6445 * assumes all arguments are valid/checked by caller.
6446 */
6447 void
6448 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6449 struct sockaddr *sa, sctp_assoc_t assoc_id,
6450 uint32_t vrf_id, int *error)
6451 {
6452 struct sockaddr *addr_touse;
6453
6454 #ifdef INET6
6455 struct sockaddr_in sin;
6456
6457 #endif
6458
6459 /* see if we're bound all already! */
6460 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6461 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 *error = EINVAL;
6463 return;
6464 }
6465 addr_touse = sa;
6466 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6467 if (sa->sa_family == AF_INET6) {
6468 struct sockaddr_in6 *sin6;
6469
6470 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6471 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6472 *error = EINVAL;
6473 return;
6474 }
6475 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6476 /* can only bind v6 on PF_INET6 sockets */
6477 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478 *error = EINVAL;
6479 return;
6480 }
6481 sin6 = (struct sockaddr_in6 *)addr_touse;
6482 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6483 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6484 SCTP_IPV6_V6ONLY(inp)) {
6485 /* can't bind mapped-v4 on PF_INET sockets */
6486 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 *error = EINVAL;
6488 return;
6489 }
6490 in6_sin6_2_sin(&sin, sin6);
6491 addr_touse = (struct sockaddr *)&sin;
6492 }
6493 }
6494 #endif
6495 if (sa->sa_family == AF_INET) {
6496 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6497 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6498 *error = EINVAL;
6499 return;
6500 }
6501 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6502 SCTP_IPV6_V6ONLY(inp)) {
6503 /* can't bind v4 on PF_INET sockets */
6504 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6505 *error = EINVAL;
6506 return;
6507 }
6508 }
6509 /*
6510 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6511 * below is ever changed we may need to lock before calling
6512 * association level binding.
6513 */
6514 if (assoc_id == 0) {
6515 /* delete the address */
6516 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6517 SCTP_DEL_IP_ADDRESS,
6518 vrf_id, NULL);
6519 } else {
6520 /*
6521 * FIX: decide whether we allow assoc based bindx
6522 */
6523 }
6524 }
6525
6526 /*
6527 * returns the valid local address count for an assoc, taking into account
6528 * all scoping rules
6529 */
6530 int
6531 sctp_local_addr_count(struct sctp_tcb *stcb)
6532 {
6533 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6534 int ipv4_addr_legal, ipv6_addr_legal;
6535 struct sctp_vrf *vrf;
6536 struct sctp_ifn *sctp_ifn;
6537 struct sctp_ifa *sctp_ifa;
6538 int count = 0;
6539
6540 /* Turn on all the appropriate scopes */
6541 loopback_scope = stcb->asoc.loopback_scope;
6542 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6543 local_scope = stcb->asoc.local_scope;
6544 site_scope = stcb->asoc.site_scope;
6545 ipv4_addr_legal = ipv6_addr_legal = 0;
6546 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6547 ipv6_addr_legal = 1;
6548 if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6549 ipv4_addr_legal = 1;
6550 }
6551 } else {
6552 ipv4_addr_legal = 1;
6553 }
6554
6555 SCTP_IPI_ADDR_RLOCK();
6556 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6557 if (vrf == NULL) {
6558 /* no vrf, no addresses */
6559 SCTP_IPI_ADDR_RUNLOCK();
6560 return (0);
6561 }
6562 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6563 /*
6564 * bound all case: go through all ifns on the vrf
6565 */
6566 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6567 if ((loopback_scope == 0) &&
6568 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6569 continue;
6570 }
6571 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6572 if (sctp_is_addr_restricted(stcb, sctp_ifa))
6573 continue;
6574 switch (sctp_ifa->address.sa.sa_family) {
6575 case AF_INET:
6576 if (ipv4_addr_legal) {
6577 struct sockaddr_in *sin;
6578
6579 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6580 if (sin->sin_addr.s_addr == 0) {
6581 /*
6582 * skip unspecified
6583 * addrs
6584 */
6585 continue;
6586 }
6587 if ((ipv4_local_scope == 0) &&
6588 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6589 continue;
6590 }
6591 /* count this one */
6592 count++;
6593 } else {
6594 continue;
6595 }
6596 break;
6597 #ifdef INET6
6598 case AF_INET6:
6599 if (ipv6_addr_legal) {
6600 struct sockaddr_in6 *sin6;
6601
6602 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6603 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6604 continue;
6605 }
6606 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6607 if (local_scope == 0)
6608 continue;
6609 if (sin6->sin6_scope_id == 0) {
6610 if (sa6_recoverscope(sin6) != 0)
6611 /*
6612 *
6613 * bad
6614 *
6615 * li
6616 * nk
6617 *
6618 * loc
6619 * al
6620 *
6621 * add
6622 * re
6623 * ss
6624 * */
6625 continue;
6626 }
6627 }
6628 if ((site_scope == 0) &&
6629 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6630 continue;
6631 }
6632 /* count this one */
6633 count++;
6634 }
6635 break;
6636 #endif
6637 default:
6638 /* TSNH */
6639 break;
6640 }
6641 }
6642 }
6643 } else {
6644 /*
6645 * subset bound case
6646 */
6647 struct sctp_laddr *laddr;
6648
6649 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6650 sctp_nxt_addr) {
6651 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6652 continue;
6653 }
6654 /* count this one */
6655 count++;
6656 }
6657 }
6658 SCTP_IPI_ADDR_RUNLOCK();
6659 return (count);
6660 }
6661
6662 #if defined(SCTP_LOCAL_TRACE_BUF)
6663
6664 void
6665 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6666 {
6667 uint32_t saveindex, newindex;
6668
6669 do {
6670 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6671 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6672 newindex = 1;
6673 } else {
6674 newindex = saveindex + 1;
6675 }
6676 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6677 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6678 saveindex = 0;
6679 }
6680 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6681 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6682 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6683 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6684 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6685 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6686 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6687 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6688 }
6689
6690 #endif
6691 /* We will need to add support
6692 * to bind the ports and such here
6693 * so we can do UDP tunneling. In
6694 * the mean-time, we return error
6695 */
6696
6697 void
6698 sctp_over_udp_stop(void)
6699 {
6700 return;
6701 }
6702 int
6703 sctp_over_udp_start(void)
6704 {
6705 return (-1);
6706 }
Cache object: d963ccb04f77c3acf80cb74651746c95
|