1 /*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61
62
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 #if defined(SCTP_LOCAL_TRACE_BUF)
74 struct sctp_cwnd_log sctp_clog;
75
76 sctp_clog.x.sb.stcb = stcb;
77 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
78 if (stcb)
79 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
80 else
81 sctp_clog.x.sb.stcb_sbcc = 0;
82 sctp_clog.x.sb.incr = incr;
83 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
84 SCTP_LOG_EVENT_SB,
85 from,
86 sctp_clog.x.misc.log1,
87 sctp_clog.x.misc.log2,
88 sctp_clog.x.misc.log3,
89 sctp_clog.x.misc.log4);
90 #endif
91 }
92
93 void
94 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
95 {
96 #if defined(SCTP_LOCAL_TRACE_BUF)
97 struct sctp_cwnd_log sctp_clog;
98
99 sctp_clog.x.close.inp = (void *)inp;
100 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
101 if (stcb) {
102 sctp_clog.x.close.stcb = (void *)stcb;
103 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
104 } else {
105 sctp_clog.x.close.stcb = 0;
106 sctp_clog.x.close.state = 0;
107 }
108 sctp_clog.x.close.loc = loc;
109 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
110 SCTP_LOG_EVENT_CLOSE,
111 0,
112 sctp_clog.x.misc.log1,
113 sctp_clog.x.misc.log2,
114 sctp_clog.x.misc.log3,
115 sctp_clog.x.misc.log4);
116 #endif
117 }
118
119 void
120 rto_logging(struct sctp_nets *net, int from)
121 {
122 #if defined(SCTP_LOCAL_TRACE_BUF)
123 struct sctp_cwnd_log sctp_clog;
124
125 memset(&sctp_clog, 0, sizeof(sctp_clog));
126 sctp_clog.x.rto.net = (void *)net;
127 sctp_clog.x.rto.rtt = net->rtt / 1000;
128 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
129 SCTP_LOG_EVENT_RTT,
130 from,
131 sctp_clog.x.misc.log1,
132 sctp_clog.x.misc.log2,
133 sctp_clog.x.misc.log3,
134 sctp_clog.x.misc.log4);
135 #endif
136 }
137
138 void
139 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
140 {
141 #if defined(SCTP_LOCAL_TRACE_BUF)
142 struct sctp_cwnd_log sctp_clog;
143
144 sctp_clog.x.strlog.stcb = stcb;
145 sctp_clog.x.strlog.n_tsn = tsn;
146 sctp_clog.x.strlog.n_sseq = sseq;
147 sctp_clog.x.strlog.e_tsn = 0;
148 sctp_clog.x.strlog.e_sseq = 0;
149 sctp_clog.x.strlog.strm = stream;
150 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
151 SCTP_LOG_EVENT_STRM,
152 from,
153 sctp_clog.x.misc.log1,
154 sctp_clog.x.misc.log2,
155 sctp_clog.x.misc.log3,
156 sctp_clog.x.misc.log4);
157 #endif
158 }
159
160 void
161 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
162 {
163 #if defined(SCTP_LOCAL_TRACE_BUF)
164 struct sctp_cwnd_log sctp_clog;
165
166 sctp_clog.x.nagle.stcb = (void *)stcb;
167 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
168 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
169 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
170 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
171 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
172 SCTP_LOG_EVENT_NAGLE,
173 action,
174 sctp_clog.x.misc.log1,
175 sctp_clog.x.misc.log2,
176 sctp_clog.x.misc.log3,
177 sctp_clog.x.misc.log4);
178 #endif
179 }
180
181 void
182 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
183 {
184 #if defined(SCTP_LOCAL_TRACE_BUF)
185 struct sctp_cwnd_log sctp_clog;
186
187 sctp_clog.x.sack.cumack = cumack;
188 sctp_clog.x.sack.oldcumack = old_cumack;
189 sctp_clog.x.sack.tsn = tsn;
190 sctp_clog.x.sack.numGaps = gaps;
191 sctp_clog.x.sack.numDups = dups;
192 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
193 SCTP_LOG_EVENT_SACK,
194 from,
195 sctp_clog.x.misc.log1,
196 sctp_clog.x.misc.log2,
197 sctp_clog.x.misc.log3,
198 sctp_clog.x.misc.log4);
199 #endif
200 }
201
202 void
203 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
204 {
205 #if defined(SCTP_LOCAL_TRACE_BUF)
206 struct sctp_cwnd_log sctp_clog;
207
208 memset(&sctp_clog, 0, sizeof(sctp_clog));
209 sctp_clog.x.map.base = map;
210 sctp_clog.x.map.cum = cum;
211 sctp_clog.x.map.high = high;
212 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 SCTP_LOG_EVENT_MAP,
214 from,
215 sctp_clog.x.misc.log1,
216 sctp_clog.x.misc.log2,
217 sctp_clog.x.misc.log3,
218 sctp_clog.x.misc.log4);
219 #endif
220 }
221
222 void
223 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
224 {
225 #if defined(SCTP_LOCAL_TRACE_BUF)
226 struct sctp_cwnd_log sctp_clog;
227
228 memset(&sctp_clog, 0, sizeof(sctp_clog));
229 sctp_clog.x.fr.largest_tsn = biggest_tsn;
230 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
231 sctp_clog.x.fr.tsn = tsn;
232 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
233 SCTP_LOG_EVENT_FR,
234 from,
235 sctp_clog.x.misc.log1,
236 sctp_clog.x.misc.log2,
237 sctp_clog.x.misc.log3,
238 sctp_clog.x.misc.log4);
239 #endif
240 }
241
242 #ifdef SCTP_MBUF_LOGGING
243 void
244 sctp_log_mb(struct mbuf *m, int from)
245 {
246 #if defined(SCTP_LOCAL_TRACE_BUF)
247 struct sctp_cwnd_log sctp_clog;
248
249 sctp_clog.x.mb.mp = m;
250 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
251 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
252 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
253 if (SCTP_BUF_IS_EXTENDED(m)) {
254 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
255 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
256 } else {
257 sctp_clog.x.mb.ext = 0;
258 sctp_clog.x.mb.refcnt = 0;
259 }
260 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
261 SCTP_LOG_EVENT_MBUF,
262 from,
263 sctp_clog.x.misc.log1,
264 sctp_clog.x.misc.log2,
265 sctp_clog.x.misc.log3,
266 sctp_clog.x.misc.log4);
267 #endif
268 }
269
270 void
271 sctp_log_mbc(struct mbuf *m, int from)
272 {
273 struct mbuf *mat;
274
275 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
276 sctp_log_mb(mat, from);
277 }
278 }
279 #endif
280
281 void
282 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
283 {
284 #if defined(SCTP_LOCAL_TRACE_BUF)
285 struct sctp_cwnd_log sctp_clog;
286
287 if (control == NULL) {
288 SCTP_PRINTF("Gak log of NULL?\n");
289 return;
290 }
291 sctp_clog.x.strlog.stcb = control->stcb;
292 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
293 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
294 sctp_clog.x.strlog.strm = control->sinfo_stream;
295 if (poschk != NULL) {
296 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
297 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
298 } else {
299 sctp_clog.x.strlog.e_tsn = 0;
300 sctp_clog.x.strlog.e_sseq = 0;
301 }
302 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
303 SCTP_LOG_EVENT_STRM,
304 from,
305 sctp_clog.x.misc.log1,
306 sctp_clog.x.misc.log2,
307 sctp_clog.x.misc.log3,
308 sctp_clog.x.misc.log4);
309 #endif
310 }
311
312 void
313 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
314 {
315 #if defined(SCTP_LOCAL_TRACE_BUF)
316 struct sctp_cwnd_log sctp_clog;
317
318 sctp_clog.x.cwnd.net = net;
319 if (stcb->asoc.send_queue_cnt > 255)
320 sctp_clog.x.cwnd.cnt_in_send = 255;
321 else
322 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
323 if (stcb->asoc.stream_queue_cnt > 255)
324 sctp_clog.x.cwnd.cnt_in_str = 255;
325 else
326 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
327
328 if (net) {
329 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
330 sctp_clog.x.cwnd.inflight = net->flight_size;
331 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
332 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
333 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
334 }
335 if (SCTP_CWNDLOG_PRESEND == from) {
336 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
337 }
338 sctp_clog.x.cwnd.cwnd_augment = augment;
339 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
340 SCTP_LOG_EVENT_CWND,
341 from,
342 sctp_clog.x.misc.log1,
343 sctp_clog.x.misc.log2,
344 sctp_clog.x.misc.log3,
345 sctp_clog.x.misc.log4);
346 #endif
347 }
348
349 void
350 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
351 {
352 #if defined(SCTP_LOCAL_TRACE_BUF)
353 struct sctp_cwnd_log sctp_clog;
354
355 memset(&sctp_clog, 0, sizeof(sctp_clog));
356 if (inp) {
357 sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
358
359 } else {
360 sctp_clog.x.lock.sock = (void *)NULL;
361 }
362 sctp_clog.x.lock.inp = (void *)inp;
363 if (stcb) {
364 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
365 } else {
366 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
367 }
368 if (inp) {
369 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
370 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
371 } else {
372 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
373 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
374 }
375 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
376 if (inp && (inp->sctp_socket)) {
377 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
378 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
379 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
380 } else {
381 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
382 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
383 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
384 }
385 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 SCTP_LOG_LOCK_EVENT,
387 from,
388 sctp_clog.x.misc.log1,
389 sctp_clog.x.misc.log2,
390 sctp_clog.x.misc.log3,
391 sctp_clog.x.misc.log4);
392 #endif
393 }
394
395 void
396 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
397 {
398 #if defined(SCTP_LOCAL_TRACE_BUF)
399 struct sctp_cwnd_log sctp_clog;
400
401 memset(&sctp_clog, 0, sizeof(sctp_clog));
402 sctp_clog.x.cwnd.net = net;
403 sctp_clog.x.cwnd.cwnd_new_value = error;
404 sctp_clog.x.cwnd.inflight = net->flight_size;
405 sctp_clog.x.cwnd.cwnd_augment = burst;
406 if (stcb->asoc.send_queue_cnt > 255)
407 sctp_clog.x.cwnd.cnt_in_send = 255;
408 else
409 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
410 if (stcb->asoc.stream_queue_cnt > 255)
411 sctp_clog.x.cwnd.cnt_in_str = 255;
412 else
413 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
414 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
415 SCTP_LOG_EVENT_MAXBURST,
416 from,
417 sctp_clog.x.misc.log1,
418 sctp_clog.x.misc.log2,
419 sctp_clog.x.misc.log3,
420 sctp_clog.x.misc.log4);
421 #endif
422 }
423
424 void
425 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
426 {
427 #if defined(SCTP_LOCAL_TRACE_BUF)
428 struct sctp_cwnd_log sctp_clog;
429
430 sctp_clog.x.rwnd.rwnd = peers_rwnd;
431 sctp_clog.x.rwnd.send_size = snd_size;
432 sctp_clog.x.rwnd.overhead = overhead;
433 sctp_clog.x.rwnd.new_rwnd = 0;
434 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
435 SCTP_LOG_EVENT_RWND,
436 from,
437 sctp_clog.x.misc.log1,
438 sctp_clog.x.misc.log2,
439 sctp_clog.x.misc.log3,
440 sctp_clog.x.misc.log4);
441 #endif
442 }
443
444 void
445 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
446 {
447 #if defined(SCTP_LOCAL_TRACE_BUF)
448 struct sctp_cwnd_log sctp_clog;
449
450 sctp_clog.x.rwnd.rwnd = peers_rwnd;
451 sctp_clog.x.rwnd.send_size = flight_size;
452 sctp_clog.x.rwnd.overhead = overhead;
453 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
454 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
455 SCTP_LOG_EVENT_RWND,
456 from,
457 sctp_clog.x.misc.log1,
458 sctp_clog.x.misc.log2,
459 sctp_clog.x.misc.log3,
460 sctp_clog.x.misc.log4);
461 #endif
462 }
463
464 #ifdef SCTP_MBCNT_LOGGING
465 static void
466 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
467 {
468 #if defined(SCTP_LOCAL_TRACE_BUF)
469 struct sctp_cwnd_log sctp_clog;
470
471 sctp_clog.x.mbcnt.total_queue_size = total_oq;
472 sctp_clog.x.mbcnt.size_change = book;
473 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
474 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
475 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
476 SCTP_LOG_EVENT_MBCNT,
477 from,
478 sctp_clog.x.misc.log1,
479 sctp_clog.x.misc.log2,
480 sctp_clog.x.misc.log3,
481 sctp_clog.x.misc.log4);
482 #endif
483 }
484 #endif
485
486 void
487 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
488 {
489 #if defined(SCTP_LOCAL_TRACE_BUF)
490 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
491 SCTP_LOG_MISC_EVENT,
492 from,
493 a, b, c, d);
494 #endif
495 }
496
497 void
498 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
499 {
500 #if defined(SCTP_LOCAL_TRACE_BUF)
501 struct sctp_cwnd_log sctp_clog;
502
503 sctp_clog.x.wake.stcb = (void *)stcb;
504 sctp_clog.x.wake.wake_cnt = wake_cnt;
505 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
506 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
507 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
508
509 if (stcb->asoc.stream_queue_cnt < 0xff)
510 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
511 else
512 sctp_clog.x.wake.stream_qcnt = 0xff;
513
514 if (stcb->asoc.chunks_on_out_queue < 0xff)
515 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
516 else
517 sctp_clog.x.wake.chunks_on_oque = 0xff;
518
519 sctp_clog.x.wake.sctpflags = 0;
520 /* set in the defered mode stuff */
521 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
522 sctp_clog.x.wake.sctpflags |= 1;
523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
524 sctp_clog.x.wake.sctpflags |= 2;
525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
526 sctp_clog.x.wake.sctpflags |= 4;
527 /* what about the sb */
528 if (stcb->sctp_socket) {
529 struct socket *so = stcb->sctp_socket;
530
531 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
532 } else {
533 sctp_clog.x.wake.sbflags = 0xff;
534 }
535 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
536 SCTP_LOG_EVENT_WAKE,
537 from,
538 sctp_clog.x.misc.log1,
539 sctp_clog.x.misc.log2,
540 sctp_clog.x.misc.log3,
541 sctp_clog.x.misc.log4);
542 #endif
543 }
544
545 void
546 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
547 {
548 #if defined(SCTP_LOCAL_TRACE_BUF)
549 struct sctp_cwnd_log sctp_clog;
550
551 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
552 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
553 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
554 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
555 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
556 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
557 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
558 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
559 SCTP_LOG_EVENT_BLOCK,
560 from,
561 sctp_clog.x.misc.log1,
562 sctp_clog.x.misc.log2,
563 sctp_clog.x.misc.log3,
564 sctp_clog.x.misc.log4);
565 #endif
566 }
567
568 int
569 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
570 {
571 /* May need to fix this if ktrdump does not work */
572 return (0);
573 }
574
575 #ifdef SCTP_AUDITING_ENABLED
576 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
577 static int sctp_audit_indx = 0;
578
579 static
580 void
581 sctp_print_audit_report(void)
582 {
583 int i;
584 int cnt;
585
586 cnt = 0;
587 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
588 if ((sctp_audit_data[i][0] == 0xe0) &&
589 (sctp_audit_data[i][1] == 0x01)) {
590 cnt = 0;
591 SCTP_PRINTF("\n");
592 } else if (sctp_audit_data[i][0] == 0xf0) {
593 cnt = 0;
594 SCTP_PRINTF("\n");
595 } else if ((sctp_audit_data[i][0] == 0xc0) &&
596 (sctp_audit_data[i][1] == 0x01)) {
597 SCTP_PRINTF("\n");
598 cnt = 0;
599 }
600 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
601 (uint32_t)sctp_audit_data[i][1]);
602 cnt++;
603 if ((cnt % 14) == 0)
604 SCTP_PRINTF("\n");
605 }
606 for (i = 0; i < sctp_audit_indx; i++) {
607 if ((sctp_audit_data[i][0] == 0xe0) &&
608 (sctp_audit_data[i][1] == 0x01)) {
609 cnt = 0;
610 SCTP_PRINTF("\n");
611 } else if (sctp_audit_data[i][0] == 0xf0) {
612 cnt = 0;
613 SCTP_PRINTF("\n");
614 } else if ((sctp_audit_data[i][0] == 0xc0) &&
615 (sctp_audit_data[i][1] == 0x01)) {
616 SCTP_PRINTF("\n");
617 cnt = 0;
618 }
619 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
620 (uint32_t)sctp_audit_data[i][1]);
621 cnt++;
622 if ((cnt % 14) == 0)
623 SCTP_PRINTF("\n");
624 }
625 SCTP_PRINTF("\n");
626 }
627
628 void
629 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
630 struct sctp_nets *net)
631 {
632 int resend_cnt, tot_out, rep, tot_book_cnt;
633 struct sctp_nets *lnet;
634 struct sctp_tmit_chunk *chk;
635
636 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
637 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
638 sctp_audit_indx++;
639 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 sctp_audit_indx = 0;
641 }
642 if (inp == NULL) {
643 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
644 sctp_audit_data[sctp_audit_indx][1] = 0x01;
645 sctp_audit_indx++;
646 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647 sctp_audit_indx = 0;
648 }
649 return;
650 }
651 if (stcb == NULL) {
652 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 sctp_audit_data[sctp_audit_indx][1] = 0x02;
654 sctp_audit_indx++;
655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 sctp_audit_indx = 0;
657 }
658 return;
659 }
660 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
661 sctp_audit_data[sctp_audit_indx][1] =
662 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
663 sctp_audit_indx++;
664 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 sctp_audit_indx = 0;
666 }
667 rep = 0;
668 tot_book_cnt = 0;
669 resend_cnt = tot_out = 0;
670 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
671 if (chk->sent == SCTP_DATAGRAM_RESEND) {
672 resend_cnt++;
673 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
674 tot_out += chk->book_size;
675 tot_book_cnt++;
676 }
677 }
678 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
679 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
680 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
681 sctp_audit_indx++;
682 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
683 sctp_audit_indx = 0;
684 }
685 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
686 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
687 rep = 1;
688 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
689 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
690 sctp_audit_data[sctp_audit_indx][1] =
691 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
692 sctp_audit_indx++;
693 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694 sctp_audit_indx = 0;
695 }
696 }
697 if (tot_out != stcb->asoc.total_flight) {
698 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
699 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
700 sctp_audit_indx++;
701 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
702 sctp_audit_indx = 0;
703 }
704 rep = 1;
705 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
706 (int)stcb->asoc.total_flight);
707 stcb->asoc.total_flight = tot_out;
708 }
709 if (tot_book_cnt != stcb->asoc.total_flight_count) {
710 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
711 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
712 sctp_audit_indx++;
713 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
714 sctp_audit_indx = 0;
715 }
716 rep = 1;
717 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
718
719 stcb->asoc.total_flight_count = tot_book_cnt;
720 }
721 tot_out = 0;
722 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
723 tot_out += lnet->flight_size;
724 }
725 if (tot_out != stcb->asoc.total_flight) {
726 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
727 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
728 sctp_audit_indx++;
729 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
730 sctp_audit_indx = 0;
731 }
732 rep = 1;
733 SCTP_PRINTF("real flight:%d net total was %d\n",
734 stcb->asoc.total_flight, tot_out);
735 /* now corrective action */
736 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
737
738 tot_out = 0;
739 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
740 if ((chk->whoTo == lnet) &&
741 (chk->sent < SCTP_DATAGRAM_RESEND)) {
742 tot_out += chk->book_size;
743 }
744 }
745 if (lnet->flight_size != tot_out) {
746 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
747 (void *)lnet, lnet->flight_size,
748 tot_out);
749 lnet->flight_size = tot_out;
750 }
751 }
752 }
753 if (rep) {
754 sctp_print_audit_report();
755 }
756 }
757
758 void
759 sctp_audit_log(uint8_t ev, uint8_t fd)
760 {
761
762 sctp_audit_data[sctp_audit_indx][0] = ev;
763 sctp_audit_data[sctp_audit_indx][1] = fd;
764 sctp_audit_indx++;
765 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
766 sctp_audit_indx = 0;
767 }
768 }
769
770 #endif
771
772 /*
773 * sctp_stop_timers_for_shutdown() should be called
774 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
775 * state to make sure that all timers are stopped.
776 */
777 void
778 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
779 {
780 struct sctp_inpcb *inp;
781 struct sctp_nets *net;
782
783 inp = stcb->sctp_ep;
784
785 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
786 SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
787 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
788 SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
789 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
790 SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
791 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
792 SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
793 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
794 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
795 SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
796 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
797 SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
798 }
799 }
800
801 void
802 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
803 {
804 struct sctp_inpcb *inp;
805 struct sctp_nets *net;
806
807 inp = stcb->sctp_ep;
808 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
809 SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
810 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
811 SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
812 if (stop_assoc_kill_timer) {
813 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
814 SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
815 }
816 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
817 SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
818 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
819 SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
820 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
821 SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
822 /* Mobility adaptation */
823 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
824 SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
825 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
826 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
827 SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
828 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
829 SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
830 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
831 SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
832 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
833 SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
834 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
835 SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
836 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
837 SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
838 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
839 SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
840 }
841 }
842
843 /*
844 * A list of sizes based on typical mtu's, used only if next hop size not
845 * returned. These values MUST be multiples of 4 and MUST be ordered.
846 */
847 static uint32_t sctp_mtu_sizes[] = {
848 68,
849 296,
850 508,
851 512,
852 544,
853 576,
854 1004,
855 1492,
856 1500,
857 1536,
858 2000,
859 2048,
860 4352,
861 4464,
862 8168,
863 17912,
864 32000,
865 65532
866 };
867
868 /*
869 * Return the largest MTU in sctp_mtu_sizes smaller than val.
870 * If val is smaller than the minimum, just return the largest
871 * multiple of 4 smaller or equal to val.
872 * Ensure that the result is a multiple of 4.
873 */
874 uint32_t
875 sctp_get_prev_mtu(uint32_t val)
876 {
877 uint32_t i;
878
879 val &= 0xfffffffc;
880 if (val <= sctp_mtu_sizes[0]) {
881 return (val);
882 }
883 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
884 if (val <= sctp_mtu_sizes[i]) {
885 break;
886 }
887 }
888 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
889 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
890 return (sctp_mtu_sizes[i - 1]);
891 }
892
893 /*
894 * Return the smallest MTU in sctp_mtu_sizes larger than val.
895 * If val is larger than the maximum, just return the largest multiple of 4 smaller
896 * or equal to val.
897 * Ensure that the result is a multiple of 4.
898 */
899 uint32_t
900 sctp_get_next_mtu(uint32_t val)
901 {
902 /* select another MTU that is just bigger than this one */
903 uint32_t i;
904
905 val &= 0xfffffffc;
906 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
907 if (val < sctp_mtu_sizes[i]) {
908 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
909 ("sctp_mtu_sizes[%u] not a multiple of 4", i));
910 return (sctp_mtu_sizes[i]);
911 }
912 }
913 return (val);
914 }
915
916 void
917 sctp_fill_random_store(struct sctp_pcb *m)
918 {
919 /*
920 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
921 * our counter. The result becomes our good random numbers and we
922 * then setup to give these out. Note that we do no locking to
923 * protect this. This is ok, since if competing folks call this we
924 * will get more gobbled gook in the random store which is what we
925 * want. There is a danger that two guys will use the same random
926 * numbers, but thats ok too since that is random as well :->
927 */
928 m->store_at = 0;
929 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
930 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
931 sizeof(m->random_counter), (uint8_t *)m->random_store);
932 m->random_counter++;
933 }
934
935 uint32_t
936 sctp_select_initial_TSN(struct sctp_pcb *inp)
937 {
938 /*
939 * A true implementation should use random selection process to get
940 * the initial stream sequence number, using RFC1750 as a good
941 * guideline
942 */
943 uint32_t x, *xp;
944 uint8_t *p;
945 int store_at, new_store;
946
947 if (inp->initial_sequence_debug != 0) {
948 uint32_t ret;
949
950 ret = inp->initial_sequence_debug;
951 inp->initial_sequence_debug++;
952 return (ret);
953 }
954 retry:
955 store_at = inp->store_at;
956 new_store = store_at + sizeof(uint32_t);
957 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
958 new_store = 0;
959 }
960 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
961 goto retry;
962 }
963 if (new_store == 0) {
964 /* Refill the random store */
965 sctp_fill_random_store(inp);
966 }
967 p = &inp->random_store[store_at];
968 xp = (uint32_t *)p;
969 x = *xp;
970 return (x);
971 }
972
973 uint32_t
974 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
975 {
976 uint32_t x;
977 struct timeval now;
978
979 if (check) {
980 (void)SCTP_GETTIME_TIMEVAL(&now);
981 }
982 for (;;) {
983 x = sctp_select_initial_TSN(&inp->sctp_ep);
984 if (x == 0) {
985 /* we never use 0 */
986 continue;
987 }
988 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
989 break;
990 }
991 }
992 return (x);
993 }
994
995 int32_t
996 sctp_map_assoc_state(int kernel_state)
997 {
998 int32_t user_state;
999
1000 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1001 user_state = SCTP_CLOSED;
1002 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1003 user_state = SCTP_SHUTDOWN_PENDING;
1004 } else {
1005 switch (kernel_state & SCTP_STATE_MASK) {
1006 case SCTP_STATE_EMPTY:
1007 user_state = SCTP_CLOSED;
1008 break;
1009 case SCTP_STATE_INUSE:
1010 user_state = SCTP_CLOSED;
1011 break;
1012 case SCTP_STATE_COOKIE_WAIT:
1013 user_state = SCTP_COOKIE_WAIT;
1014 break;
1015 case SCTP_STATE_COOKIE_ECHOED:
1016 user_state = SCTP_COOKIE_ECHOED;
1017 break;
1018 case SCTP_STATE_OPEN:
1019 user_state = SCTP_ESTABLISHED;
1020 break;
1021 case SCTP_STATE_SHUTDOWN_SENT:
1022 user_state = SCTP_SHUTDOWN_SENT;
1023 break;
1024 case SCTP_STATE_SHUTDOWN_RECEIVED:
1025 user_state = SCTP_SHUTDOWN_RECEIVED;
1026 break;
1027 case SCTP_STATE_SHUTDOWN_ACK_SENT:
1028 user_state = SCTP_SHUTDOWN_ACK_SENT;
1029 break;
1030 default:
1031 user_state = SCTP_CLOSED;
1032 break;
1033 }
1034 }
1035 return (user_state);
1036 }
1037
1038 int
1039 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1040 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1041 {
1042 struct sctp_association *asoc;
1043
1044 /*
1045 * Anything set to zero is taken care of by the allocation routine's
1046 * bzero
1047 */
1048
1049 /*
1050 * Up front select what scoping to apply on addresses I tell my peer
1051 * Not sure what to do with these right now, we will need to come up
1052 * with a way to set them. We may need to pass them through from the
1053 * caller in the sctp_aloc_assoc() function.
1054 */
1055 int i;
1056 #if defined(SCTP_DETAILED_STR_STATS)
1057 int j;
1058 #endif
1059
1060 asoc = &stcb->asoc;
1061 /* init all variables to a known value. */
1062 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1063 asoc->max_burst = inp->sctp_ep.max_burst;
1064 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1065 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1066 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1067 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1068 asoc->ecn_supported = inp->ecn_supported;
1069 asoc->prsctp_supported = inp->prsctp_supported;
1070 asoc->auth_supported = inp->auth_supported;
1071 asoc->asconf_supported = inp->asconf_supported;
1072 asoc->reconfig_supported = inp->reconfig_supported;
1073 asoc->nrsack_supported = inp->nrsack_supported;
1074 asoc->pktdrop_supported = inp->pktdrop_supported;
1075 asoc->idata_supported = inp->idata_supported;
1076 asoc->sctp_cmt_pf = (uint8_t)0;
1077 asoc->sctp_frag_point = inp->sctp_frag_point;
1078 asoc->sctp_features = inp->sctp_features;
1079 asoc->default_dscp = inp->sctp_ep.default_dscp;
1080 asoc->max_cwnd = inp->max_cwnd;
1081 #ifdef INET6
1082 if (inp->sctp_ep.default_flowlabel) {
1083 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1084 } else {
1085 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1086 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1087 asoc->default_flowlabel &= 0x000fffff;
1088 asoc->default_flowlabel |= 0x80000000;
1089 } else {
1090 asoc->default_flowlabel = 0;
1091 }
1092 }
1093 #endif
1094 asoc->sb_send_resv = 0;
1095 if (override_tag) {
1096 asoc->my_vtag = override_tag;
1097 } else {
1098 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1099 }
1100 /* Get the nonce tags */
1101 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1102 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1103 asoc->vrf_id = vrf_id;
1104
1105 #ifdef SCTP_ASOCLOG_OF_TSNS
1106 asoc->tsn_in_at = 0;
1107 asoc->tsn_out_at = 0;
1108 asoc->tsn_in_wrapped = 0;
1109 asoc->tsn_out_wrapped = 0;
1110 asoc->cumack_log_at = 0;
1111 asoc->cumack_log_atsnt = 0;
1112 #endif
1113 #ifdef SCTP_FS_SPEC_LOG
1114 asoc->fs_index = 0;
1115 #endif
1116 asoc->refcnt = 0;
1117 asoc->assoc_up_sent = 0;
1118 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1119 sctp_select_initial_TSN(&inp->sctp_ep);
1120 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1121 /* we are optimisitic here */
1122 asoc->peer_supports_nat = 0;
1123 asoc->sent_queue_retran_cnt = 0;
1124
1125 /* for CMT */
1126 asoc->last_net_cmt_send_started = NULL;
1127
1128 /* This will need to be adjusted */
1129 asoc->last_acked_seq = asoc->init_seq_number - 1;
1130 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1131 asoc->asconf_seq_in = asoc->last_acked_seq;
1132
1133 /* here we are different, we hold the next one we expect */
1134 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1135
1136 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1137 asoc->initial_rto = inp->sctp_ep.initial_rto;
1138
1139 asoc->default_mtu = inp->sctp_ep.default_mtu;
1140 asoc->max_init_times = inp->sctp_ep.max_init_times;
1141 asoc->max_send_times = inp->sctp_ep.max_send_times;
1142 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1143 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1144 asoc->free_chunk_cnt = 0;
1145
1146 asoc->iam_blocking = 0;
1147 asoc->context = inp->sctp_context;
1148 asoc->local_strreset_support = inp->local_strreset_support;
1149 asoc->def_send = inp->def_send;
1150 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1151 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1152 asoc->pr_sctp_cnt = 0;
1153 asoc->total_output_queue_size = 0;
1154
1155 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1156 asoc->scope.ipv6_addr_legal = 1;
1157 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1158 asoc->scope.ipv4_addr_legal = 1;
1159 } else {
1160 asoc->scope.ipv4_addr_legal = 0;
1161 }
1162 } else {
1163 asoc->scope.ipv6_addr_legal = 0;
1164 asoc->scope.ipv4_addr_legal = 1;
1165 }
1166
1167 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1168 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1169
1170 asoc->smallest_mtu = inp->sctp_frag_point;
1171 asoc->minrto = inp->sctp_ep.sctp_minrto;
1172 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1173
1174 asoc->stream_locked_on = 0;
1175 asoc->ecn_echo_cnt_onq = 0;
1176 asoc->stream_locked = 0;
1177
1178 asoc->send_sack = 1;
1179
1180 LIST_INIT(&asoc->sctp_restricted_addrs);
1181
1182 TAILQ_INIT(&asoc->nets);
1183 TAILQ_INIT(&asoc->pending_reply_queue);
1184 TAILQ_INIT(&asoc->asconf_ack_sent);
1185 /* Setup to fill the hb random cache at first HB */
1186 asoc->hb_random_idx = 4;
1187
1188 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1189
1190 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1191 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1192
1193 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1194 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1195
1196 /*
1197 * Now the stream parameters, here we allocate space for all streams
1198 * that we request by default.
1199 */
1200 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1201 o_strms;
1202 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1203 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1204 SCTP_M_STRMO);
1205 if (asoc->strmout == NULL) {
1206 /* big trouble no memory */
1207 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1208 return (ENOMEM);
1209 }
1210 for (i = 0; i < asoc->streamoutcnt; i++) {
1211 /*
1212 * inbound side must be set to 0xffff, also NOTE when we get
1213 * the INIT-ACK back (for INIT sender) we MUST reduce the
1214 * count (streamoutcnt) but first check if we sent to any of
1215 * the upper streams that were dropped (if some were). Those
1216 * that were dropped must be notified to the upper layer as
1217 * failed to send.
1218 */
1219 asoc->strmout[i].next_mid_ordered = 0;
1220 asoc->strmout[i].next_mid_unordered = 0;
1221 TAILQ_INIT(&asoc->strmout[i].outqueue);
1222 asoc->strmout[i].chunks_on_queues = 0;
1223 #if defined(SCTP_DETAILED_STR_STATS)
1224 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1225 asoc->strmout[i].abandoned_sent[j] = 0;
1226 asoc->strmout[i].abandoned_unsent[j] = 0;
1227 }
1228 #else
1229 asoc->strmout[i].abandoned_sent[0] = 0;
1230 asoc->strmout[i].abandoned_unsent[0] = 0;
1231 #endif
1232 asoc->strmout[i].sid = i;
1233 asoc->strmout[i].last_msg_incomplete = 0;
1234 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1235 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1236 }
1237 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1238
1239 /* Now the mapping array */
1240 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1241 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1242 SCTP_M_MAP);
1243 if (asoc->mapping_array == NULL) {
1244 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1245 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1246 return (ENOMEM);
1247 }
1248 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1249 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1250 SCTP_M_MAP);
1251 if (asoc->nr_mapping_array == NULL) {
1252 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1253 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1255 return (ENOMEM);
1256 }
1257 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1258
1259 /* Now the init of the other outqueues */
1260 TAILQ_INIT(&asoc->free_chunks);
1261 TAILQ_INIT(&asoc->control_send_queue);
1262 TAILQ_INIT(&asoc->asconf_send_queue);
1263 TAILQ_INIT(&asoc->send_queue);
1264 TAILQ_INIT(&asoc->sent_queue);
1265 TAILQ_INIT(&asoc->resetHead);
1266 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1267 TAILQ_INIT(&asoc->asconf_queue);
1268 /* authentication fields */
1269 asoc->authinfo.random = NULL;
1270 asoc->authinfo.active_keyid = 0;
1271 asoc->authinfo.assoc_key = NULL;
1272 asoc->authinfo.assoc_keyid = 0;
1273 asoc->authinfo.recv_key = NULL;
1274 asoc->authinfo.recv_keyid = 0;
1275 LIST_INIT(&asoc->shared_keys);
1276 asoc->marked_retrans = 0;
1277 asoc->port = inp->sctp_ep.port;
1278 asoc->timoinit = 0;
1279 asoc->timodata = 0;
1280 asoc->timosack = 0;
1281 asoc->timoshutdown = 0;
1282 asoc->timoheartbeat = 0;
1283 asoc->timocookie = 0;
1284 asoc->timoshutdownack = 0;
1285 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1286 asoc->discontinuity_time = asoc->start_time;
1287 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1288 asoc->abandoned_unsent[i] = 0;
1289 asoc->abandoned_sent[i] = 0;
1290 }
1291 /*
1292 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1293 * freed later when the association is freed.
1294 */
1295 return (0);
1296 }
1297
1298 void
1299 sctp_print_mapping_array(struct sctp_association *asoc)
1300 {
1301 unsigned int i, limit;
1302
1303 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1304 asoc->mapping_array_size,
1305 asoc->mapping_array_base_tsn,
1306 asoc->cumulative_tsn,
1307 asoc->highest_tsn_inside_map,
1308 asoc->highest_tsn_inside_nr_map);
1309 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1310 if (asoc->mapping_array[limit - 1] != 0) {
1311 break;
1312 }
1313 }
1314 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1315 for (i = 0; i < limit; i++) {
1316 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1317 }
1318 if (limit % 16)
1319 SCTP_PRINTF("\n");
1320 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1321 if (asoc->nr_mapping_array[limit - 1]) {
1322 break;
1323 }
1324 }
1325 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1326 for (i = 0; i < limit; i++) {
1327 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1328 }
1329 if (limit % 16)
1330 SCTP_PRINTF("\n");
1331 }
1332
1333 int
1334 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1335 {
1336 /* mapping array needs to grow */
1337 uint8_t *new_array1, *new_array2;
1338 uint32_t new_size;
1339
1340 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1341 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1342 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1343 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1344 /* can't get more, forget it */
1345 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1346 if (new_array1) {
1347 SCTP_FREE(new_array1, SCTP_M_MAP);
1348 }
1349 if (new_array2) {
1350 SCTP_FREE(new_array2, SCTP_M_MAP);
1351 }
1352 return (-1);
1353 }
1354 memset(new_array1, 0, new_size);
1355 memset(new_array2, 0, new_size);
1356 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1357 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1358 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1359 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1360 asoc->mapping_array = new_array1;
1361 asoc->nr_mapping_array = new_array2;
1362 asoc->mapping_array_size = new_size;
1363 return (0);
1364 }
1365
1366
1367 static void
1368 sctp_iterator_work(struct sctp_iterator *it)
1369 {
1370 int iteration_count = 0;
1371 int inp_skip = 0;
1372 int first_in = 1;
1373 struct sctp_inpcb *tinp;
1374
1375 SCTP_INP_INFO_RLOCK();
1376 SCTP_ITERATOR_LOCK();
1377 sctp_it_ctl.cur_it = it;
1378 if (it->inp) {
1379 SCTP_INP_RLOCK(it->inp);
1380 SCTP_INP_DECR_REF(it->inp);
1381 }
1382 if (it->inp == NULL) {
1383 /* iterator is complete */
1384 done_with_iterator:
1385 sctp_it_ctl.cur_it = NULL;
1386 SCTP_ITERATOR_UNLOCK();
1387 SCTP_INP_INFO_RUNLOCK();
1388 if (it->function_atend != NULL) {
1389 (*it->function_atend) (it->pointer, it->val);
1390 }
1391 SCTP_FREE(it, SCTP_M_ITER);
1392 return;
1393 }
1394 select_a_new_ep:
1395 if (first_in) {
1396 first_in = 0;
1397 } else {
1398 SCTP_INP_RLOCK(it->inp);
1399 }
1400 while (((it->pcb_flags) &&
1401 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1402 ((it->pcb_features) &&
1403 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1404 /* endpoint flags or features don't match, so keep looking */
1405 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1406 SCTP_INP_RUNLOCK(it->inp);
1407 goto done_with_iterator;
1408 }
1409 tinp = it->inp;
1410 it->inp = LIST_NEXT(it->inp, sctp_list);
1411 it->stcb = NULL;
1412 SCTP_INP_RUNLOCK(tinp);
1413 if (it->inp == NULL) {
1414 goto done_with_iterator;
1415 }
1416 SCTP_INP_RLOCK(it->inp);
1417 }
1418 /* now go through each assoc which is in the desired state */
1419 if (it->done_current_ep == 0) {
1420 if (it->function_inp != NULL)
1421 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1422 it->done_current_ep = 1;
1423 }
1424 if (it->stcb == NULL) {
1425 /* run the per instance function */
1426 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1427 }
1428 if ((inp_skip) || it->stcb == NULL) {
1429 if (it->function_inp_end != NULL) {
1430 inp_skip = (*it->function_inp_end) (it->inp,
1431 it->pointer,
1432 it->val);
1433 }
1434 SCTP_INP_RUNLOCK(it->inp);
1435 goto no_stcb;
1436 }
1437 while (it->stcb) {
1438 SCTP_TCB_LOCK(it->stcb);
1439 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1440 /* not in the right state... keep looking */
1441 SCTP_TCB_UNLOCK(it->stcb);
1442 goto next_assoc;
1443 }
1444 /* see if we have limited out the iterator loop */
1445 iteration_count++;
1446 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1447 /* Pause to let others grab the lock */
1448 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1449 SCTP_TCB_UNLOCK(it->stcb);
1450 SCTP_INP_INCR_REF(it->inp);
1451 SCTP_INP_RUNLOCK(it->inp);
1452 SCTP_ITERATOR_UNLOCK();
1453 SCTP_INP_INFO_RUNLOCK();
1454 SCTP_INP_INFO_RLOCK();
1455 SCTP_ITERATOR_LOCK();
1456 if (sctp_it_ctl.iterator_flags) {
1457 /* We won't be staying here */
1458 SCTP_INP_DECR_REF(it->inp);
1459 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1460 if (sctp_it_ctl.iterator_flags &
1461 SCTP_ITERATOR_STOP_CUR_IT) {
1462 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1463 goto done_with_iterator;
1464 }
1465 if (sctp_it_ctl.iterator_flags &
1466 SCTP_ITERATOR_STOP_CUR_INP) {
1467 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1468 goto no_stcb;
1469 }
1470 /* If we reach here huh? */
1471 SCTP_PRINTF("Unknown it ctl flag %x\n",
1472 sctp_it_ctl.iterator_flags);
1473 sctp_it_ctl.iterator_flags = 0;
1474 }
1475 SCTP_INP_RLOCK(it->inp);
1476 SCTP_INP_DECR_REF(it->inp);
1477 SCTP_TCB_LOCK(it->stcb);
1478 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1479 iteration_count = 0;
1480 }
1481 KASSERT(it->inp == it->stcb->sctp_ep,
1482 ("%s: stcb %p does not belong to inp %p, but inp %p",
1483 __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1484
1485 /* run function on this one */
1486 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1487
1488 /*
1489 * we lie here, it really needs to have its own type but
1490 * first I must verify that this won't effect things :-0
1491 */
1492 if (it->no_chunk_output == 0)
1493 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1494
1495 SCTP_TCB_UNLOCK(it->stcb);
1496 next_assoc:
1497 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1498 if (it->stcb == NULL) {
1499 /* Run last function */
1500 if (it->function_inp_end != NULL) {
1501 inp_skip = (*it->function_inp_end) (it->inp,
1502 it->pointer,
1503 it->val);
1504 }
1505 }
1506 }
1507 SCTP_INP_RUNLOCK(it->inp);
1508 no_stcb:
1509 /* done with all assocs on this endpoint, move on to next endpoint */
1510 it->done_current_ep = 0;
1511 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1512 it->inp = NULL;
1513 } else {
1514 it->inp = LIST_NEXT(it->inp, sctp_list);
1515 }
1516 it->stcb = NULL;
1517 if (it->inp == NULL) {
1518 goto done_with_iterator;
1519 }
1520 goto select_a_new_ep;
1521 }
1522
1523 void
1524 sctp_iterator_worker(void)
1525 {
1526 struct sctp_iterator *it;
1527
1528 /* This function is called with the WQ lock in place */
1529 sctp_it_ctl.iterator_running = 1;
1530 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1531 /* now lets work on this one */
1532 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1533 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1534 CURVNET_SET(it->vn);
1535 sctp_iterator_work(it);
1536 CURVNET_RESTORE();
1537 SCTP_IPI_ITERATOR_WQ_LOCK();
1538 /* sa_ignore FREED_MEMORY */
1539 }
1540 sctp_it_ctl.iterator_running = 0;
1541 return;
1542 }
1543
1544
1545 static void
1546 sctp_handle_addr_wq(void)
1547 {
1548 /* deal with the ADDR wq from the rtsock calls */
1549 struct sctp_laddr *wi, *nwi;
1550 struct sctp_asconf_iterator *asc;
1551
1552 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1553 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1554 if (asc == NULL) {
1555 /* Try later, no memory */
1556 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1557 (struct sctp_inpcb *)NULL,
1558 (struct sctp_tcb *)NULL,
1559 (struct sctp_nets *)NULL);
1560 return;
1561 }
1562 LIST_INIT(&asc->list_of_work);
1563 asc->cnt = 0;
1564
1565 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1566 LIST_REMOVE(wi, sctp_nxt_addr);
1567 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1568 asc->cnt++;
1569 }
1570
1571 if (asc->cnt == 0) {
1572 SCTP_FREE(asc, SCTP_M_ASC_IT);
1573 } else {
1574 int ret;
1575
1576 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1577 sctp_asconf_iterator_stcb,
1578 NULL, /* No ep end for boundall */
1579 SCTP_PCB_FLAGS_BOUNDALL,
1580 SCTP_PCB_ANY_FEATURES,
1581 SCTP_ASOC_ANY_STATE,
1582 (void *)asc, 0,
1583 sctp_asconf_iterator_end, NULL, 0);
1584 if (ret) {
1585 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1586 /*
1587 * Freeing if we are stopping or put back on the
1588 * addr_wq.
1589 */
1590 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1591 sctp_asconf_iterator_end(asc, 0);
1592 } else {
1593 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1594 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1595 }
1596 SCTP_FREE(asc, SCTP_M_ASC_IT);
1597 }
1598 }
1599 }
1600 }
1601
1602 void
1603 sctp_timeout_handler(void *t)
1604 {
1605 struct sctp_inpcb *inp;
1606 struct sctp_tcb *stcb;
1607 struct sctp_nets *net;
1608 struct sctp_timer *tmr;
1609 struct mbuf *op_err;
1610 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1611 struct socket *so;
1612 #endif
1613 int did_output;
1614 int type;
1615
1616 tmr = (struct sctp_timer *)t;
1617 inp = (struct sctp_inpcb *)tmr->ep;
1618 stcb = (struct sctp_tcb *)tmr->tcb;
1619 net = (struct sctp_nets *)tmr->net;
1620 CURVNET_SET((struct vnet *)tmr->vnet);
1621 did_output = 1;
1622
1623 #ifdef SCTP_AUDITING_ENABLED
1624 sctp_audit_log(0xF0, (uint8_t)tmr->type);
1625 sctp_auditing(3, inp, stcb, net);
1626 #endif
1627
1628 /* sanity checks... */
1629 if (tmr->self != (void *)tmr) {
1630 /*
1631 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1632 * (void *)tmr);
1633 */
1634 CURVNET_RESTORE();
1635 return;
1636 }
1637 tmr->stopped_from = 0xa001;
1638 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1639 /*
1640 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1641 * tmr->type);
1642 */
1643 CURVNET_RESTORE();
1644 return;
1645 }
1646 tmr->stopped_from = 0xa002;
1647 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1648 CURVNET_RESTORE();
1649 return;
1650 }
1651 /* if this is an iterator timeout, get the struct and clear inp */
1652 tmr->stopped_from = 0xa003;
1653 if (inp) {
1654 SCTP_INP_INCR_REF(inp);
1655 if ((inp->sctp_socket == NULL) &&
1656 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1657 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1658 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1659 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1660 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1661 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1662 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1663 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1664 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1665 SCTP_INP_DECR_REF(inp);
1666 CURVNET_RESTORE();
1667 return;
1668 }
1669 }
1670 tmr->stopped_from = 0xa004;
1671 if (stcb) {
1672 atomic_add_int(&stcb->asoc.refcnt, 1);
1673 if (stcb->asoc.state == 0) {
1674 atomic_add_int(&stcb->asoc.refcnt, -1);
1675 if (inp) {
1676 SCTP_INP_DECR_REF(inp);
1677 }
1678 CURVNET_RESTORE();
1679 return;
1680 }
1681 }
1682 type = tmr->type;
1683 tmr->stopped_from = 0xa005;
1684 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1685 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1686 if (inp) {
1687 SCTP_INP_DECR_REF(inp);
1688 }
1689 if (stcb) {
1690 atomic_add_int(&stcb->asoc.refcnt, -1);
1691 }
1692 CURVNET_RESTORE();
1693 return;
1694 }
1695 tmr->stopped_from = 0xa006;
1696
1697 if (stcb) {
1698 SCTP_TCB_LOCK(stcb);
1699 atomic_add_int(&stcb->asoc.refcnt, -1);
1700 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1701 ((stcb->asoc.state == 0) ||
1702 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1703 SCTP_TCB_UNLOCK(stcb);
1704 if (inp) {
1705 SCTP_INP_DECR_REF(inp);
1706 }
1707 CURVNET_RESTORE();
1708 return;
1709 }
1710 } else if (inp != NULL) {
1711 if (type != SCTP_TIMER_TYPE_INPKILL) {
1712 SCTP_INP_WLOCK(inp);
1713 }
1714 } else {
1715 SCTP_WQ_ADDR_LOCK();
1716 }
1717 /* record in stopped what t-o occurred */
1718 tmr->stopped_from = type;
1719
1720 /* mark as being serviced now */
1721 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1722 /*
1723 * Callout has been rescheduled.
1724 */
1725 goto get_out;
1726 }
1727 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1728 /*
1729 * Not active, so no action.
1730 */
1731 goto get_out;
1732 }
1733 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1734
1735 /* call the handler for the appropriate timer type */
1736 switch (type) {
1737 case SCTP_TIMER_TYPE_ADDR_WQ:
1738 sctp_handle_addr_wq();
1739 break;
1740 case SCTP_TIMER_TYPE_SEND:
1741 if ((stcb == NULL) || (inp == NULL)) {
1742 break;
1743 }
1744 SCTP_STAT_INCR(sctps_timodata);
1745 stcb->asoc.timodata++;
1746 stcb->asoc.num_send_timers_up--;
1747 if (stcb->asoc.num_send_timers_up < 0) {
1748 stcb->asoc.num_send_timers_up = 0;
1749 }
1750 SCTP_TCB_LOCK_ASSERT(stcb);
1751 if (sctp_t3rxt_timer(inp, stcb, net)) {
1752 /* no need to unlock on tcb its gone */
1753
1754 goto out_decr;
1755 }
1756 SCTP_TCB_LOCK_ASSERT(stcb);
1757 #ifdef SCTP_AUDITING_ENABLED
1758 sctp_auditing(4, inp, stcb, net);
1759 #endif
1760 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1761 if ((stcb->asoc.num_send_timers_up == 0) &&
1762 (stcb->asoc.sent_queue_cnt > 0)) {
1763 struct sctp_tmit_chunk *chk;
1764
1765 /*
1766 * Safeguard. If there on some on the sent queue
1767 * somewhere but no timers running something is
1768 * wrong... so we start a timer on the first chunk
1769 * on the send queue on whatever net it is sent to.
1770 */
1771 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1772 if (chk->whoTo != NULL) {
1773 break;
1774 }
1775 }
1776 if (chk != NULL) {
1777 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1778 }
1779 }
1780 break;
1781 case SCTP_TIMER_TYPE_INIT:
1782 if ((stcb == NULL) || (inp == NULL)) {
1783 break;
1784 }
1785 SCTP_STAT_INCR(sctps_timoinit);
1786 stcb->asoc.timoinit++;
1787 if (sctp_t1init_timer(inp, stcb, net)) {
1788 /* no need to unlock on tcb its gone */
1789 goto out_decr;
1790 }
1791 /* We do output but not here */
1792 did_output = 0;
1793 break;
1794 case SCTP_TIMER_TYPE_RECV:
1795 if ((stcb == NULL) || (inp == NULL)) {
1796 break;
1797 }
1798 SCTP_STAT_INCR(sctps_timosack);
1799 stcb->asoc.timosack++;
1800 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1801 #ifdef SCTP_AUDITING_ENABLED
1802 sctp_auditing(4, inp, stcb, net);
1803 #endif
1804 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1805 break;
1806 case SCTP_TIMER_TYPE_SHUTDOWN:
1807 if ((stcb == NULL) || (inp == NULL)) {
1808 break;
1809 }
1810 if (sctp_shutdown_timer(inp, stcb, net)) {
1811 /* no need to unlock on tcb its gone */
1812 goto out_decr;
1813 }
1814 SCTP_STAT_INCR(sctps_timoshutdown);
1815 stcb->asoc.timoshutdown++;
1816 #ifdef SCTP_AUDITING_ENABLED
1817 sctp_auditing(4, inp, stcb, net);
1818 #endif
1819 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1820 break;
1821 case SCTP_TIMER_TYPE_HEARTBEAT:
1822 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1823 break;
1824 }
1825 SCTP_STAT_INCR(sctps_timoheartbeat);
1826 stcb->asoc.timoheartbeat++;
1827 if (sctp_heartbeat_timer(inp, stcb, net)) {
1828 /* no need to unlock on tcb its gone */
1829 goto out_decr;
1830 }
1831 #ifdef SCTP_AUDITING_ENABLED
1832 sctp_auditing(4, inp, stcb, net);
1833 #endif
1834 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1835 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1836 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1837 }
1838 break;
1839 case SCTP_TIMER_TYPE_COOKIE:
1840 if ((stcb == NULL) || (inp == NULL)) {
1841 break;
1842 }
1843
1844 if (sctp_cookie_timer(inp, stcb, net)) {
1845 /* no need to unlock on tcb its gone */
1846 goto out_decr;
1847 }
1848 SCTP_STAT_INCR(sctps_timocookie);
1849 stcb->asoc.timocookie++;
1850 #ifdef SCTP_AUDITING_ENABLED
1851 sctp_auditing(4, inp, stcb, net);
1852 #endif
1853 /*
1854 * We consider T3 and Cookie timer pretty much the same with
1855 * respect to where from in chunk_output.
1856 */
1857 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1858 break;
1859 case SCTP_TIMER_TYPE_NEWCOOKIE:
1860 {
1861 struct timeval tv;
1862 int i, secret;
1863
1864 if (inp == NULL) {
1865 break;
1866 }
1867 SCTP_STAT_INCR(sctps_timosecret);
1868 (void)SCTP_GETTIME_TIMEVAL(&tv);
1869 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1870 inp->sctp_ep.last_secret_number =
1871 inp->sctp_ep.current_secret_number;
1872 inp->sctp_ep.current_secret_number++;
1873 if (inp->sctp_ep.current_secret_number >=
1874 SCTP_HOW_MANY_SECRETS) {
1875 inp->sctp_ep.current_secret_number = 0;
1876 }
1877 secret = (int)inp->sctp_ep.current_secret_number;
1878 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1879 inp->sctp_ep.secret_key[secret][i] =
1880 sctp_select_initial_TSN(&inp->sctp_ep);
1881 }
1882 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1883 }
1884 did_output = 0;
1885 break;
1886 case SCTP_TIMER_TYPE_PATHMTURAISE:
1887 if ((stcb == NULL) || (inp == NULL)) {
1888 break;
1889 }
1890 SCTP_STAT_INCR(sctps_timopathmtu);
1891 sctp_pathmtu_timer(inp, stcb, net);
1892 did_output = 0;
1893 break;
1894 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1895 if ((stcb == NULL) || (inp == NULL)) {
1896 break;
1897 }
1898 if (sctp_shutdownack_timer(inp, stcb, net)) {
1899 /* no need to unlock on tcb its gone */
1900 goto out_decr;
1901 }
1902 SCTP_STAT_INCR(sctps_timoshutdownack);
1903 stcb->asoc.timoshutdownack++;
1904 #ifdef SCTP_AUDITING_ENABLED
1905 sctp_auditing(4, inp, stcb, net);
1906 #endif
1907 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1908 break;
1909 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1910 if ((stcb == NULL) || (inp == NULL)) {
1911 break;
1912 }
1913 SCTP_STAT_INCR(sctps_timoshutdownguard);
1914 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1915 "Shutdown guard timer expired");
1916 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1917 /* no need to unlock on tcb its gone */
1918 goto out_decr;
1919
1920 case SCTP_TIMER_TYPE_STRRESET:
1921 if ((stcb == NULL) || (inp == NULL)) {
1922 break;
1923 }
1924 if (sctp_strreset_timer(inp, stcb, net)) {
1925 /* no need to unlock on tcb its gone */
1926 goto out_decr;
1927 }
1928 SCTP_STAT_INCR(sctps_timostrmrst);
1929 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1930 break;
1931 case SCTP_TIMER_TYPE_ASCONF:
1932 if ((stcb == NULL) || (inp == NULL)) {
1933 break;
1934 }
1935 if (sctp_asconf_timer(inp, stcb, net)) {
1936 /* no need to unlock on tcb its gone */
1937 goto out_decr;
1938 }
1939 SCTP_STAT_INCR(sctps_timoasconf);
1940 #ifdef SCTP_AUDITING_ENABLED
1941 sctp_auditing(4, inp, stcb, net);
1942 #endif
1943 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1944 break;
1945 case SCTP_TIMER_TYPE_PRIM_DELETED:
1946 if ((stcb == NULL) || (inp == NULL)) {
1947 break;
1948 }
1949 sctp_delete_prim_timer(inp, stcb, net);
1950 SCTP_STAT_INCR(sctps_timodelprim);
1951 break;
1952
1953 case SCTP_TIMER_TYPE_AUTOCLOSE:
1954 if ((stcb == NULL) || (inp == NULL)) {
1955 break;
1956 }
1957 SCTP_STAT_INCR(sctps_timoautoclose);
1958 sctp_autoclose_timer(inp, stcb, net);
1959 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1960 did_output = 0;
1961 break;
1962 case SCTP_TIMER_TYPE_ASOCKILL:
1963 if ((stcb == NULL) || (inp == NULL)) {
1964 break;
1965 }
1966 SCTP_STAT_INCR(sctps_timoassockill);
1967 /* Can we free it yet? */
1968 SCTP_INP_DECR_REF(inp);
1969 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1970 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1971 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1972 so = SCTP_INP_SO(inp);
1973 atomic_add_int(&stcb->asoc.refcnt, 1);
1974 SCTP_TCB_UNLOCK(stcb);
1975 SCTP_SOCKET_LOCK(so, 1);
1976 SCTP_TCB_LOCK(stcb);
1977 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1978 #endif
1979 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1980 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1981 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1982 SCTP_SOCKET_UNLOCK(so, 1);
1983 #endif
1984 /*
1985 * free asoc, always unlocks (or destroy's) so prevent
1986 * duplicate unlock or unlock of a free mtx :-0
1987 */
1988 stcb = NULL;
1989 goto out_no_decr;
1990 case SCTP_TIMER_TYPE_INPKILL:
1991 SCTP_STAT_INCR(sctps_timoinpkill);
1992 if (inp == NULL) {
1993 break;
1994 }
1995 /*
1996 * special case, take away our increment since WE are the
1997 * killer
1998 */
1999 SCTP_INP_DECR_REF(inp);
2000 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2001 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2002 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2003 SCTP_CALLED_FROM_INPKILL_TIMER);
2004 inp = NULL;
2005 goto out_no_decr;
2006 default:
2007 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2008 type);
2009 break;
2010 }
2011 #ifdef SCTP_AUDITING_ENABLED
2012 sctp_audit_log(0xF1, (uint8_t)type);
2013 if (inp)
2014 sctp_auditing(5, inp, stcb, net);
2015 #endif
2016 if ((did_output) && stcb) {
2017 /*
2018 * Now we need to clean up the control chunk chain if an
2019 * ECNE is on it. It must be marked as UNSENT again so next
2020 * call will continue to send it until such time that we get
2021 * a CWR, to remove it. It is, however, less likely that we
2022 * will find a ecn echo on the chain though.
2023 */
2024 sctp_fix_ecn_echo(&stcb->asoc);
2025 }
2026 get_out:
2027 if (stcb) {
2028 SCTP_TCB_UNLOCK(stcb);
2029 } else if (inp != NULL) {
2030 SCTP_INP_WUNLOCK(inp);
2031 } else {
2032 SCTP_WQ_ADDR_UNLOCK();
2033 }
2034
2035 out_decr:
2036 if (inp) {
2037 SCTP_INP_DECR_REF(inp);
2038 }
2039
2040 out_no_decr:
2041 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2042 CURVNET_RESTORE();
2043 }
2044
2045 void
2046 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2047 struct sctp_nets *net)
2048 {
2049 uint32_t to_ticks;
2050 struct sctp_timer *tmr;
2051
2052 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2053 return;
2054
2055 tmr = NULL;
2056 if (stcb) {
2057 SCTP_TCB_LOCK_ASSERT(stcb);
2058 }
2059 /* Don't restart timer on net that's been removed. */
2060 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2061 return;
2062 }
2063 switch (t_type) {
2064 case SCTP_TIMER_TYPE_ADDR_WQ:
2065 /* Only 1 tick away :-) */
2066 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2067 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2068 break;
2069 case SCTP_TIMER_TYPE_SEND:
2070 /* Here we use the RTO timer */
2071 {
2072 int rto_val;
2073
2074 if ((stcb == NULL) || (net == NULL)) {
2075 return;
2076 }
2077 tmr = &net->rxt_timer;
2078 if (net->RTO == 0) {
2079 rto_val = stcb->asoc.initial_rto;
2080 } else {
2081 rto_val = net->RTO;
2082 }
2083 to_ticks = MSEC_TO_TICKS(rto_val);
2084 }
2085 break;
2086 case SCTP_TIMER_TYPE_INIT:
2087 /*
2088 * Here we use the INIT timer default usually about 1
2089 * minute.
2090 */
2091 if ((stcb == NULL) || (net == NULL)) {
2092 return;
2093 }
2094 tmr = &net->rxt_timer;
2095 if (net->RTO == 0) {
2096 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2097 } else {
2098 to_ticks = MSEC_TO_TICKS(net->RTO);
2099 }
2100 break;
2101 case SCTP_TIMER_TYPE_RECV:
2102 /*
2103 * Here we use the Delayed-Ack timer value from the inp
2104 * ususually about 200ms.
2105 */
2106 if (stcb == NULL) {
2107 return;
2108 }
2109 tmr = &stcb->asoc.dack_timer;
2110 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2111 break;
2112 case SCTP_TIMER_TYPE_SHUTDOWN:
2113 /* Here we use the RTO of the destination. */
2114 if ((stcb == NULL) || (net == NULL)) {
2115 return;
2116 }
2117 if (net->RTO == 0) {
2118 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2119 } else {
2120 to_ticks = MSEC_TO_TICKS(net->RTO);
2121 }
2122 tmr = &net->rxt_timer;
2123 break;
2124 case SCTP_TIMER_TYPE_HEARTBEAT:
2125 /*
2126 * the net is used here so that we can add in the RTO. Even
2127 * though we use a different timer. We also add the HB timer
2128 * PLUS a random jitter.
2129 */
2130 if ((stcb == NULL) || (net == NULL)) {
2131 return;
2132 } else {
2133 uint32_t rndval;
2134 uint32_t jitter;
2135
2136 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2137 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2138 return;
2139 }
2140 if (net->RTO == 0) {
2141 to_ticks = stcb->asoc.initial_rto;
2142 } else {
2143 to_ticks = net->RTO;
2144 }
2145 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2146 jitter = rndval % to_ticks;
2147 if (jitter >= (to_ticks >> 1)) {
2148 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2149 } else {
2150 to_ticks = to_ticks - jitter;
2151 }
2152 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2153 !(net->dest_state & SCTP_ADDR_PF)) {
2154 to_ticks += net->heart_beat_delay;
2155 }
2156 /*
2157 * Now we must convert the to_ticks that are now in
2158 * ms to ticks.
2159 */
2160 to_ticks = MSEC_TO_TICKS(to_ticks);
2161 tmr = &net->hb_timer;
2162 }
2163 break;
2164 case SCTP_TIMER_TYPE_COOKIE:
2165 /*
2166 * Here we can use the RTO timer from the network since one
2167 * RTT was compelete. If a retran happened then we will be
2168 * using the RTO initial value.
2169 */
2170 if ((stcb == NULL) || (net == NULL)) {
2171 return;
2172 }
2173 if (net->RTO == 0) {
2174 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2175 } else {
2176 to_ticks = MSEC_TO_TICKS(net->RTO);
2177 }
2178 tmr = &net->rxt_timer;
2179 break;
2180 case SCTP_TIMER_TYPE_NEWCOOKIE:
2181 /*
2182 * nothing needed but the endpoint here ususually about 60
2183 * minutes.
2184 */
2185 tmr = &inp->sctp_ep.signature_change;
2186 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2187 break;
2188 case SCTP_TIMER_TYPE_ASOCKILL:
2189 if (stcb == NULL) {
2190 return;
2191 }
2192 tmr = &stcb->asoc.strreset_timer;
2193 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2194 break;
2195 case SCTP_TIMER_TYPE_INPKILL:
2196 /*
2197 * The inp is setup to die. We re-use the signature_chage
2198 * timer since that has stopped and we are in the GONE
2199 * state.
2200 */
2201 tmr = &inp->sctp_ep.signature_change;
2202 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2203 break;
2204 case SCTP_TIMER_TYPE_PATHMTURAISE:
2205 /*
2206 * Here we use the value found in the EP for PMTU ususually
2207 * about 10 minutes.
2208 */
2209 if ((stcb == NULL) || (net == NULL)) {
2210 return;
2211 }
2212 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2213 return;
2214 }
2215 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2216 tmr = &net->pmtu_timer;
2217 break;
2218 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2219 /* Here we use the RTO of the destination */
2220 if ((stcb == NULL) || (net == NULL)) {
2221 return;
2222 }
2223 if (net->RTO == 0) {
2224 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2225 } else {
2226 to_ticks = MSEC_TO_TICKS(net->RTO);
2227 }
2228 tmr = &net->rxt_timer;
2229 break;
2230 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2231 /*
2232 * Here we use the endpoints shutdown guard timer usually
2233 * about 3 minutes.
2234 */
2235 if (stcb == NULL) {
2236 return;
2237 }
2238 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2239 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2240 } else {
2241 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2242 }
2243 tmr = &stcb->asoc.shut_guard_timer;
2244 break;
2245 case SCTP_TIMER_TYPE_STRRESET:
2246 /*
2247 * Here the timer comes from the stcb but its value is from
2248 * the net's RTO.
2249 */
2250 if ((stcb == NULL) || (net == NULL)) {
2251 return;
2252 }
2253 if (net->RTO == 0) {
2254 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2255 } else {
2256 to_ticks = MSEC_TO_TICKS(net->RTO);
2257 }
2258 tmr = &stcb->asoc.strreset_timer;
2259 break;
2260 case SCTP_TIMER_TYPE_ASCONF:
2261 /*
2262 * Here the timer comes from the stcb but its value is from
2263 * the net's RTO.
2264 */
2265 if ((stcb == NULL) || (net == NULL)) {
2266 return;
2267 }
2268 if (net->RTO == 0) {
2269 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2270 } else {
2271 to_ticks = MSEC_TO_TICKS(net->RTO);
2272 }
2273 tmr = &stcb->asoc.asconf_timer;
2274 break;
2275 case SCTP_TIMER_TYPE_PRIM_DELETED:
2276 if ((stcb == NULL) || (net != NULL)) {
2277 return;
2278 }
2279 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2280 tmr = &stcb->asoc.delete_prim_timer;
2281 break;
2282 case SCTP_TIMER_TYPE_AUTOCLOSE:
2283 if (stcb == NULL) {
2284 return;
2285 }
2286 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2287 /*
2288 * Really an error since stcb is NOT set to
2289 * autoclose
2290 */
2291 return;
2292 }
2293 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2294 tmr = &stcb->asoc.autoclose_timer;
2295 break;
2296 default:
2297 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2298 __func__, t_type);
2299 return;
2300 break;
2301 }
2302 if ((to_ticks <= 0) || (tmr == NULL)) {
2303 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2304 __func__, t_type, to_ticks, (void *)tmr);
2305 return;
2306 }
2307 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2308 /*
2309 * we do NOT allow you to have it already running. if it is
2310 * we leave the current one up unchanged
2311 */
2312 return;
2313 }
2314 /* At this point we can proceed */
2315 if (t_type == SCTP_TIMER_TYPE_SEND) {
2316 stcb->asoc.num_send_timers_up++;
2317 }
2318 tmr->stopped_from = 0;
2319 tmr->type = t_type;
2320 tmr->ep = (void *)inp;
2321 tmr->tcb = (void *)stcb;
2322 tmr->net = (void *)net;
2323 tmr->self = (void *)tmr;
2324 tmr->vnet = (void *)curvnet;
2325 tmr->ticks = sctp_get_tick_count();
2326 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2327 return;
2328 }
2329
2330 void
2331 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2332 struct sctp_nets *net, uint32_t from)
2333 {
2334 struct sctp_timer *tmr;
2335
2336 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2337 (inp == NULL))
2338 return;
2339
2340 tmr = NULL;
2341 if (stcb) {
2342 SCTP_TCB_LOCK_ASSERT(stcb);
2343 }
2344 switch (t_type) {
2345 case SCTP_TIMER_TYPE_ADDR_WQ:
2346 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2347 break;
2348 case SCTP_TIMER_TYPE_SEND:
2349 if ((stcb == NULL) || (net == NULL)) {
2350 return;
2351 }
2352 tmr = &net->rxt_timer;
2353 break;
2354 case SCTP_TIMER_TYPE_INIT:
2355 if ((stcb == NULL) || (net == NULL)) {
2356 return;
2357 }
2358 tmr = &net->rxt_timer;
2359 break;
2360 case SCTP_TIMER_TYPE_RECV:
2361 if (stcb == NULL) {
2362 return;
2363 }
2364 tmr = &stcb->asoc.dack_timer;
2365 break;
2366 case SCTP_TIMER_TYPE_SHUTDOWN:
2367 if ((stcb == NULL) || (net == NULL)) {
2368 return;
2369 }
2370 tmr = &net->rxt_timer;
2371 break;
2372 case SCTP_TIMER_TYPE_HEARTBEAT:
2373 if ((stcb == NULL) || (net == NULL)) {
2374 return;
2375 }
2376 tmr = &net->hb_timer;
2377 break;
2378 case SCTP_TIMER_TYPE_COOKIE:
2379 if ((stcb == NULL) || (net == NULL)) {
2380 return;
2381 }
2382 tmr = &net->rxt_timer;
2383 break;
2384 case SCTP_TIMER_TYPE_NEWCOOKIE:
2385 /* nothing needed but the endpoint here */
2386 tmr = &inp->sctp_ep.signature_change;
2387 /*
2388 * We re-use the newcookie timer for the INP kill timer. We
2389 * must assure that we do not kill it by accident.
2390 */
2391 break;
2392 case SCTP_TIMER_TYPE_ASOCKILL:
2393 /*
2394 * Stop the asoc kill timer.
2395 */
2396 if (stcb == NULL) {
2397 return;
2398 }
2399 tmr = &stcb->asoc.strreset_timer;
2400 break;
2401
2402 case SCTP_TIMER_TYPE_INPKILL:
2403 /*
2404 * The inp is setup to die. We re-use the signature_chage
2405 * timer since that has stopped and we are in the GONE
2406 * state.
2407 */
2408 tmr = &inp->sctp_ep.signature_change;
2409 break;
2410 case SCTP_TIMER_TYPE_PATHMTURAISE:
2411 if ((stcb == NULL) || (net == NULL)) {
2412 return;
2413 }
2414 tmr = &net->pmtu_timer;
2415 break;
2416 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2417 if ((stcb == NULL) || (net == NULL)) {
2418 return;
2419 }
2420 tmr = &net->rxt_timer;
2421 break;
2422 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2423 if (stcb == NULL) {
2424 return;
2425 }
2426 tmr = &stcb->asoc.shut_guard_timer;
2427 break;
2428 case SCTP_TIMER_TYPE_STRRESET:
2429 if (stcb == NULL) {
2430 return;
2431 }
2432 tmr = &stcb->asoc.strreset_timer;
2433 break;
2434 case SCTP_TIMER_TYPE_ASCONF:
2435 if (stcb == NULL) {
2436 return;
2437 }
2438 tmr = &stcb->asoc.asconf_timer;
2439 break;
2440 case SCTP_TIMER_TYPE_PRIM_DELETED:
2441 if (stcb == NULL) {
2442 return;
2443 }
2444 tmr = &stcb->asoc.delete_prim_timer;
2445 break;
2446 case SCTP_TIMER_TYPE_AUTOCLOSE:
2447 if (stcb == NULL) {
2448 return;
2449 }
2450 tmr = &stcb->asoc.autoclose_timer;
2451 break;
2452 default:
2453 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2454 __func__, t_type);
2455 break;
2456 }
2457 if (tmr == NULL) {
2458 return;
2459 }
2460 if ((tmr->type != t_type) && tmr->type) {
2461 /*
2462 * Ok we have a timer that is under joint use. Cookie timer
2463 * per chance with the SEND timer. We therefore are NOT
2464 * running the timer that the caller wants stopped. So just
2465 * return.
2466 */
2467 return;
2468 }
2469 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2470 stcb->asoc.num_send_timers_up--;
2471 if (stcb->asoc.num_send_timers_up < 0) {
2472 stcb->asoc.num_send_timers_up = 0;
2473 }
2474 }
2475 tmr->self = NULL;
2476 tmr->stopped_from = from;
2477 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2478 return;
2479 }
2480
2481 uint32_t
2482 sctp_calculate_len(struct mbuf *m)
2483 {
2484 uint32_t tlen = 0;
2485 struct mbuf *at;
2486
2487 at = m;
2488 while (at) {
2489 tlen += SCTP_BUF_LEN(at);
2490 at = SCTP_BUF_NEXT(at);
2491 }
2492 return (tlen);
2493 }
2494
2495 void
2496 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2497 struct sctp_association *asoc, uint32_t mtu)
2498 {
2499 /*
2500 * Reset the P-MTU size on this association, this involves changing
2501 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2502 * allow the DF flag to be cleared.
2503 */
2504 struct sctp_tmit_chunk *chk;
2505 unsigned int eff_mtu, ovh;
2506
2507 asoc->smallest_mtu = mtu;
2508 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2509 ovh = SCTP_MIN_OVERHEAD;
2510 } else {
2511 ovh = SCTP_MIN_V4_OVERHEAD;
2512 }
2513 eff_mtu = mtu - ovh;
2514 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2515 if (chk->send_size > eff_mtu) {
2516 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2517 }
2518 }
2519 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2520 if (chk->send_size > eff_mtu) {
2521 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2522 }
2523 }
2524 }
2525
2526
2527 /*
2528 * Given an association and starting time of the current RTT period, update
2529 * RTO in number of msecs. net should point to the current network.
2530 * Return 1, if an RTO update was performed, return 0 if no update was
2531 * performed due to invalid starting point.
2532 */
2533
2534 int
2535 sctp_calculate_rto(struct sctp_tcb *stcb,
2536 struct sctp_association *asoc,
2537 struct sctp_nets *net,
2538 struct timeval *old,
2539 int rtt_from_sack)
2540 {
2541 struct timeval now;
2542 uint64_t rtt_us; /* RTT in us */
2543 int32_t rtt; /* RTT in ms */
2544 uint32_t new_rto;
2545 int first_measure = 0;
2546
2547 /************************/
2548 /* 1. calculate new RTT */
2549 /************************/
2550 /* get the current time */
2551 if (stcb->asoc.use_precise_time) {
2552 (void)SCTP_GETPTIME_TIMEVAL(&now);
2553 } else {
2554 (void)SCTP_GETTIME_TIMEVAL(&now);
2555 }
2556 if ((old->tv_sec > now.tv_sec) ||
2557 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
2558 /* The starting point is in the future. */
2559 return (0);
2560 }
2561 timevalsub(&now, old);
2562 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2563 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2564 /* The RTT is larger than a sane value. */
2565 return (0);
2566 }
2567 /* store the current RTT in us */
2568 net->rtt = rtt_us;
2569 /* compute rtt in ms */
2570 rtt = (int32_t)(net->rtt / 1000);
2571 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2572 /*
2573 * Tell the CC module that a new update has just occurred
2574 * from a sack
2575 */
2576 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2577 }
2578 /*
2579 * Do we need to determine the lan? We do this only on sacks i.e.
2580 * RTT being determined from data not non-data (HB/INIT->INITACK).
2581 */
2582 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2583 (net->lan_type == SCTP_LAN_UNKNOWN)) {
2584 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2585 net->lan_type = SCTP_LAN_INTERNET;
2586 } else {
2587 net->lan_type = SCTP_LAN_LOCAL;
2588 }
2589 }
2590
2591 /***************************/
2592 /* 2. update RTTVAR & SRTT */
2593 /***************************/
2594 /*-
2595 * Compute the scaled average lastsa and the
2596 * scaled variance lastsv as described in van Jacobson
2597 * Paper "Congestion Avoidance and Control", Annex A.
2598 *
2599 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2600 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2601 */
2602 if (net->RTO_measured) {
2603 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2604 net->lastsa += rtt;
2605 if (rtt < 0) {
2606 rtt = -rtt;
2607 }
2608 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2609 net->lastsv += rtt;
2610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2611 rto_logging(net, SCTP_LOG_RTTVAR);
2612 }
2613 } else {
2614 /* First RTO measurment */
2615 net->RTO_measured = 1;
2616 first_measure = 1;
2617 net->lastsa = rtt << SCTP_RTT_SHIFT;
2618 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2620 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2621 }
2622 }
2623 if (net->lastsv == 0) {
2624 net->lastsv = SCTP_CLOCK_GRANULARITY;
2625 }
2626 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2627 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2628 (stcb->asoc.sat_network_lockout == 0)) {
2629 stcb->asoc.sat_network = 1;
2630 } else if ((!first_measure) && stcb->asoc.sat_network) {
2631 stcb->asoc.sat_network = 0;
2632 stcb->asoc.sat_network_lockout = 1;
2633 }
2634 /* bound it, per C6/C7 in Section 5.3.1 */
2635 if (new_rto < stcb->asoc.minrto) {
2636 new_rto = stcb->asoc.minrto;
2637 }
2638 if (new_rto > stcb->asoc.maxrto) {
2639 new_rto = stcb->asoc.maxrto;
2640 }
2641 net->RTO = new_rto;
2642 return (1);
2643 }
2644
2645 /*
2646 * return a pointer to a contiguous piece of data from the given mbuf chain
2647 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2648 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2649 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2650 */
2651 caddr_t
2652 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2653 {
2654 uint32_t count;
2655 uint8_t *ptr;
2656
2657 ptr = in_ptr;
2658 if ((off < 0) || (len <= 0))
2659 return (NULL);
2660
2661 /* find the desired start location */
2662 while ((m != NULL) && (off > 0)) {
2663 if (off < SCTP_BUF_LEN(m))
2664 break;
2665 off -= SCTP_BUF_LEN(m);
2666 m = SCTP_BUF_NEXT(m);
2667 }
2668 if (m == NULL)
2669 return (NULL);
2670
2671 /* is the current mbuf large enough (eg. contiguous)? */
2672 if ((SCTP_BUF_LEN(m) - off) >= len) {
2673 return (mtod(m, caddr_t)+off);
2674 } else {
2675 /* else, it spans more than one mbuf, so save a temp copy... */
2676 while ((m != NULL) && (len > 0)) {
2677 count = min(SCTP_BUF_LEN(m) - off, len);
2678 memcpy(ptr, mtod(m, caddr_t)+off, count);
2679 len -= count;
2680 ptr += count;
2681 off = 0;
2682 m = SCTP_BUF_NEXT(m);
2683 }
2684 if ((m == NULL) && (len > 0))
2685 return (NULL);
2686 else
2687 return ((caddr_t)in_ptr);
2688 }
2689 }
2690
2691
2692
2693 struct sctp_paramhdr *
2694 sctp_get_next_param(struct mbuf *m,
2695 int offset,
2696 struct sctp_paramhdr *pull,
2697 int pull_limit)
2698 {
2699 /* This just provides a typed signature to Peter's Pull routine */
2700 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2701 (uint8_t *)pull));
2702 }
2703
2704
2705 struct mbuf *
2706 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2707 {
2708 struct mbuf *m_last;
2709 caddr_t dp;
2710
2711 if (padlen > 3) {
2712 return (NULL);
2713 }
2714 if (padlen <= M_TRAILINGSPACE(m)) {
2715 /*
2716 * The easy way. We hope the majority of the time we hit
2717 * here :)
2718 */
2719 m_last = m;
2720 } else {
2721 /* Hard way we must grow the mbuf chain */
2722 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2723 if (m_last == NULL) {
2724 return (NULL);
2725 }
2726 SCTP_BUF_LEN(m_last) = 0;
2727 SCTP_BUF_NEXT(m_last) = NULL;
2728 SCTP_BUF_NEXT(m) = m_last;
2729 }
2730 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2731 SCTP_BUF_LEN(m_last) += padlen;
2732 memset(dp, 0, padlen);
2733 return (m_last);
2734 }
2735
2736 struct mbuf *
2737 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2738 {
2739 /* find the last mbuf in chain and pad it */
2740 struct mbuf *m_at;
2741
2742 if (last_mbuf != NULL) {
2743 return (sctp_add_pad_tombuf(last_mbuf, padval));
2744 } else {
2745 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2746 if (SCTP_BUF_NEXT(m_at) == NULL) {
2747 return (sctp_add_pad_tombuf(m_at, padval));
2748 }
2749 }
2750 }
2751 return (NULL);
2752 }
2753
2754 static void
2755 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2756 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2757 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2758 SCTP_UNUSED
2759 #endif
2760 )
2761 {
2762 struct mbuf *m_notify;
2763 struct sctp_assoc_change *sac;
2764 struct sctp_queued_to_read *control;
2765 unsigned int notif_len;
2766 uint16_t abort_len;
2767 unsigned int i;
2768 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2769 struct socket *so;
2770 #endif
2771
2772 if (stcb == NULL) {
2773 return;
2774 }
2775 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2776 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2777 if (abort != NULL) {
2778 abort_len = ntohs(abort->ch.chunk_length);
2779 /*
2780 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2781 * contiguous.
2782 */
2783 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2784 abort_len = SCTP_CHUNK_BUFFER_SIZE;
2785 }
2786 } else {
2787 abort_len = 0;
2788 }
2789 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2790 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2791 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2792 notif_len += abort_len;
2793 }
2794 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2795 if (m_notify == NULL) {
2796 /* Retry with smaller value. */
2797 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2798 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2799 if (m_notify == NULL) {
2800 goto set_error;
2801 }
2802 }
2803 SCTP_BUF_NEXT(m_notify) = NULL;
2804 sac = mtod(m_notify, struct sctp_assoc_change *);
2805 memset(sac, 0, notif_len);
2806 sac->sac_type = SCTP_ASSOC_CHANGE;
2807 sac->sac_flags = 0;
2808 sac->sac_length = sizeof(struct sctp_assoc_change);
2809 sac->sac_state = state;
2810 sac->sac_error = error;
2811 /* XXX verify these stream counts */
2812 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2813 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2814 sac->sac_assoc_id = sctp_get_associd(stcb);
2815 if (notif_len > sizeof(struct sctp_assoc_change)) {
2816 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2817 i = 0;
2818 if (stcb->asoc.prsctp_supported == 1) {
2819 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2820 }
2821 if (stcb->asoc.auth_supported == 1) {
2822 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2823 }
2824 if (stcb->asoc.asconf_supported == 1) {
2825 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2826 }
2827 if (stcb->asoc.idata_supported == 1) {
2828 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2829 }
2830 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2831 if (stcb->asoc.reconfig_supported == 1) {
2832 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2833 }
2834 sac->sac_length += i;
2835 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2836 memcpy(sac->sac_info, abort, abort_len);
2837 sac->sac_length += abort_len;
2838 }
2839 }
2840 SCTP_BUF_LEN(m_notify) = sac->sac_length;
2841 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2842 0, 0, stcb->asoc.context, 0, 0, 0,
2843 m_notify);
2844 if (control != NULL) {
2845 control->length = SCTP_BUF_LEN(m_notify);
2846 control->spec_flags = M_NOTIFICATION;
2847 /* not that we need this */
2848 control->tail_mbuf = m_notify;
2849 sctp_add_to_readq(stcb->sctp_ep, stcb,
2850 control,
2851 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2852 so_locked);
2853 } else {
2854 sctp_m_freem(m_notify);
2855 }
2856 }
2857 /*
2858 * For 1-to-1 style sockets, we send up and error when an ABORT
2859 * comes in.
2860 */
2861 set_error:
2862 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2863 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2864 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2865 SOCK_LOCK(stcb->sctp_socket);
2866 if (from_peer) {
2867 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2868 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2869 stcb->sctp_socket->so_error = ECONNREFUSED;
2870 } else {
2871 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2872 stcb->sctp_socket->so_error = ECONNRESET;
2873 }
2874 } else {
2875 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2876 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2877 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2878 stcb->sctp_socket->so_error = ETIMEDOUT;
2879 } else {
2880 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2881 stcb->sctp_socket->so_error = ECONNABORTED;
2882 }
2883 }
2884 }
2885 /* Wake ANY sleepers */
2886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2887 so = SCTP_INP_SO(stcb->sctp_ep);
2888 if (!so_locked) {
2889 atomic_add_int(&stcb->asoc.refcnt, 1);
2890 SCTP_TCB_UNLOCK(stcb);
2891 SCTP_SOCKET_LOCK(so, 1);
2892 SCTP_TCB_LOCK(stcb);
2893 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2894 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2895 SCTP_SOCKET_UNLOCK(so, 1);
2896 return;
2897 }
2898 }
2899 #endif
2900 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2901 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2902 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2903 socantrcvmore_locked(stcb->sctp_socket);
2904 }
2905 sorwakeup(stcb->sctp_socket);
2906 sowwakeup(stcb->sctp_socket);
2907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2908 if (!so_locked) {
2909 SCTP_SOCKET_UNLOCK(so, 1);
2910 }
2911 #endif
2912 }
2913
2914 static void
2915 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2916 struct sockaddr *sa, uint32_t error, int so_locked
2917 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2918 SCTP_UNUSED
2919 #endif
2920 )
2921 {
2922 struct mbuf *m_notify;
2923 struct sctp_paddr_change *spc;
2924 struct sctp_queued_to_read *control;
2925
2926 if ((stcb == NULL) ||
2927 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2928 /* event not enabled */
2929 return;
2930 }
2931 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2932 if (m_notify == NULL)
2933 return;
2934 SCTP_BUF_LEN(m_notify) = 0;
2935 spc = mtod(m_notify, struct sctp_paddr_change *);
2936 memset(spc, 0, sizeof(struct sctp_paddr_change));
2937 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2938 spc->spc_flags = 0;
2939 spc->spc_length = sizeof(struct sctp_paddr_change);
2940 switch (sa->sa_family) {
2941 #ifdef INET
2942 case AF_INET:
2943 #ifdef INET6
2944 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2945 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2946 (struct sockaddr_in6 *)&spc->spc_aaddr);
2947 } else {
2948 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2949 }
2950 #else
2951 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2952 #endif
2953 break;
2954 #endif
2955 #ifdef INET6
2956 case AF_INET6:
2957 {
2958 struct sockaddr_in6 *sin6;
2959
2960 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2961
2962 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2963 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2964 if (sin6->sin6_scope_id == 0) {
2965 /* recover scope_id for user */
2966 (void)sa6_recoverscope(sin6);
2967 } else {
2968 /* clear embedded scope_id for user */
2969 in6_clearscope(&sin6->sin6_addr);
2970 }
2971 }
2972 break;
2973 }
2974 #endif
2975 default:
2976 /* TSNH */
2977 break;
2978 }
2979 spc->spc_state = state;
2980 spc->spc_error = error;
2981 spc->spc_assoc_id = sctp_get_associd(stcb);
2982
2983 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2984 SCTP_BUF_NEXT(m_notify) = NULL;
2985
2986 /* append to socket */
2987 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2988 0, 0, stcb->asoc.context, 0, 0, 0,
2989 m_notify);
2990 if (control == NULL) {
2991 /* no memory */
2992 sctp_m_freem(m_notify);
2993 return;
2994 }
2995 control->length = SCTP_BUF_LEN(m_notify);
2996 control->spec_flags = M_NOTIFICATION;
2997 /* not that we need this */
2998 control->tail_mbuf = m_notify;
2999 sctp_add_to_readq(stcb->sctp_ep, stcb,
3000 control,
3001 &stcb->sctp_socket->so_rcv, 1,
3002 SCTP_READ_LOCK_NOT_HELD,
3003 so_locked);
3004 }
3005
3006
3007 static void
3008 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3009 struct sctp_tmit_chunk *chk, int so_locked
3010 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3011 SCTP_UNUSED
3012 #endif
3013 )
3014 {
3015 struct mbuf *m_notify;
3016 struct sctp_send_failed *ssf;
3017 struct sctp_send_failed_event *ssfe;
3018 struct sctp_queued_to_read *control;
3019 struct sctp_chunkhdr *chkhdr;
3020 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3021
3022 if ((stcb == NULL) ||
3023 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3024 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3025 /* event not enabled */
3026 return;
3027 }
3028
3029 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3030 notifhdr_len = sizeof(struct sctp_send_failed_event);
3031 } else {
3032 notifhdr_len = sizeof(struct sctp_send_failed);
3033 }
3034 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3035 if (m_notify == NULL)
3036 /* no space left */
3037 return;
3038 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3039 if (stcb->asoc.idata_supported) {
3040 chkhdr_len = sizeof(struct sctp_idata_chunk);
3041 } else {
3042 chkhdr_len = sizeof(struct sctp_data_chunk);
3043 }
3044 /* Use some defaults in case we can't access the chunk header */
3045 if (chk->send_size >= chkhdr_len) {
3046 payload_len = chk->send_size - chkhdr_len;
3047 } else {
3048 payload_len = 0;
3049 }
3050 padding_len = 0;
3051 if (chk->data != NULL) {
3052 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3053 if (chkhdr != NULL) {
3054 chk_len = ntohs(chkhdr->chunk_length);
3055 if ((chk_len >= chkhdr_len) &&
3056 (chk->send_size >= chk_len) &&
3057 (chk->send_size - chk_len < 4)) {
3058 padding_len = chk->send_size - chk_len;
3059 payload_len = chk->send_size - chkhdr_len - padding_len;
3060 }
3061 }
3062 }
3063 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3064 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3065 memset(ssfe, 0, notifhdr_len);
3066 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3067 if (sent) {
3068 ssfe->ssfe_flags = SCTP_DATA_SENT;
3069 } else {
3070 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3071 }
3072 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3073 ssfe->ssfe_error = error;
3074 /* not exactly what the user sent in, but should be close :) */
3075 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3076 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3077 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3078 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3079 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3080 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3081 } else {
3082 ssf = mtod(m_notify, struct sctp_send_failed *);
3083 memset(ssf, 0, notifhdr_len);
3084 ssf->ssf_type = SCTP_SEND_FAILED;
3085 if (sent) {
3086 ssf->ssf_flags = SCTP_DATA_SENT;
3087 } else {
3088 ssf->ssf_flags = SCTP_DATA_UNSENT;
3089 }
3090 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3091 ssf->ssf_error = error;
3092 /* not exactly what the user sent in, but should be close :) */
3093 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3094 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3095 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3096 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3097 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3098 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3099 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3100 }
3101 if (chk->data != NULL) {
3102 /* Trim off the sctp chunk header (it should be there) */
3103 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3104 m_adj(chk->data, chkhdr_len);
3105 m_adj(chk->data, -padding_len);
3106 sctp_mbuf_crush(chk->data);
3107 chk->send_size -= (chkhdr_len + padding_len);
3108 }
3109 }
3110 SCTP_BUF_NEXT(m_notify) = chk->data;
3111 /* Steal off the mbuf */
3112 chk->data = NULL;
3113 /*
3114 * For this case, we check the actual socket buffer, since the assoc
3115 * is going away we don't want to overfill the socket buffer for a
3116 * non-reader
3117 */
3118 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3119 sctp_m_freem(m_notify);
3120 return;
3121 }
3122 /* append to socket */
3123 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3124 0, 0, stcb->asoc.context, 0, 0, 0,
3125 m_notify);
3126 if (control == NULL) {
3127 /* no memory */
3128 sctp_m_freem(m_notify);
3129 return;
3130 }
3131 control->length = SCTP_BUF_LEN(m_notify);
3132 control->spec_flags = M_NOTIFICATION;
3133 /* not that we need this */
3134 control->tail_mbuf = m_notify;
3135 sctp_add_to_readq(stcb->sctp_ep, stcb,
3136 control,
3137 &stcb->sctp_socket->so_rcv, 1,
3138 SCTP_READ_LOCK_NOT_HELD,
3139 so_locked);
3140 }
3141
3142
3143 static void
3144 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3145 struct sctp_stream_queue_pending *sp, int so_locked
3146 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3147 SCTP_UNUSED
3148 #endif
3149 )
3150 {
3151 struct mbuf *m_notify;
3152 struct sctp_send_failed *ssf;
3153 struct sctp_send_failed_event *ssfe;
3154 struct sctp_queued_to_read *control;
3155 int notifhdr_len;
3156
3157 if ((stcb == NULL) ||
3158 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3159 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3160 /* event not enabled */
3161 return;
3162 }
3163 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3164 notifhdr_len = sizeof(struct sctp_send_failed_event);
3165 } else {
3166 notifhdr_len = sizeof(struct sctp_send_failed);
3167 }
3168 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3169 if (m_notify == NULL) {
3170 /* no space left */
3171 return;
3172 }
3173 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3174 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3175 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3176 memset(ssfe, 0, notifhdr_len);
3177 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3178 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3179 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3180 ssfe->ssfe_error = error;
3181 /* not exactly what the user sent in, but should be close :) */
3182 ssfe->ssfe_info.snd_sid = sp->sid;
3183 if (sp->some_taken) {
3184 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3185 } else {
3186 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3187 }
3188 ssfe->ssfe_info.snd_ppid = sp->ppid;
3189 ssfe->ssfe_info.snd_context = sp->context;
3190 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3191 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3192 } else {
3193 ssf = mtod(m_notify, struct sctp_send_failed *);
3194 memset(ssf, 0, notifhdr_len);
3195 ssf->ssf_type = SCTP_SEND_FAILED;
3196 ssf->ssf_flags = SCTP_DATA_UNSENT;
3197 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3198 ssf->ssf_error = error;
3199 /* not exactly what the user sent in, but should be close :) */
3200 ssf->ssf_info.sinfo_stream = sp->sid;
3201 ssf->ssf_info.sinfo_ssn = 0;
3202 if (sp->some_taken) {
3203 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3204 } else {
3205 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3206 }
3207 ssf->ssf_info.sinfo_ppid = sp->ppid;
3208 ssf->ssf_info.sinfo_context = sp->context;
3209 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3210 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3211 }
3212 SCTP_BUF_NEXT(m_notify) = sp->data;
3213
3214 /* Steal off the mbuf */
3215 sp->data = NULL;
3216 /*
3217 * For this case, we check the actual socket buffer, since the assoc
3218 * is going away we don't want to overfill the socket buffer for a
3219 * non-reader
3220 */
3221 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3222 sctp_m_freem(m_notify);
3223 return;
3224 }
3225 /* append to socket */
3226 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3227 0, 0, stcb->asoc.context, 0, 0, 0,
3228 m_notify);
3229 if (control == NULL) {
3230 /* no memory */
3231 sctp_m_freem(m_notify);
3232 return;
3233 }
3234 control->length = SCTP_BUF_LEN(m_notify);
3235 control->spec_flags = M_NOTIFICATION;
3236 /* not that we need this */
3237 control->tail_mbuf = m_notify;
3238 sctp_add_to_readq(stcb->sctp_ep, stcb,
3239 control,
3240 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3241 }
3242
3243
3244
3245 static void
3246 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3247 {
3248 struct mbuf *m_notify;
3249 struct sctp_adaptation_event *sai;
3250 struct sctp_queued_to_read *control;
3251
3252 if ((stcb == NULL) ||
3253 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3254 /* event not enabled */
3255 return;
3256 }
3257
3258 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3259 if (m_notify == NULL)
3260 /* no space left */
3261 return;
3262 SCTP_BUF_LEN(m_notify) = 0;
3263 sai = mtod(m_notify, struct sctp_adaptation_event *);
3264 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3265 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3266 sai->sai_flags = 0;
3267 sai->sai_length = sizeof(struct sctp_adaptation_event);
3268 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3269 sai->sai_assoc_id = sctp_get_associd(stcb);
3270
3271 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3272 SCTP_BUF_NEXT(m_notify) = NULL;
3273
3274 /* append to socket */
3275 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3276 0, 0, stcb->asoc.context, 0, 0, 0,
3277 m_notify);
3278 if (control == NULL) {
3279 /* no memory */
3280 sctp_m_freem(m_notify);
3281 return;
3282 }
3283 control->length = SCTP_BUF_LEN(m_notify);
3284 control->spec_flags = M_NOTIFICATION;
3285 /* not that we need this */
3286 control->tail_mbuf = m_notify;
3287 sctp_add_to_readq(stcb->sctp_ep, stcb,
3288 control,
3289 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3290 }
3291
3292 /* This always must be called with the read-queue LOCKED in the INP */
3293 static void
3294 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3295 uint32_t val, int so_locked
3296 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3297 SCTP_UNUSED
3298 #endif
3299 )
3300 {
3301 struct mbuf *m_notify;
3302 struct sctp_pdapi_event *pdapi;
3303 struct sctp_queued_to_read *control;
3304 struct sockbuf *sb;
3305
3306 if ((stcb == NULL) ||
3307 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3308 /* event not enabled */
3309 return;
3310 }
3311 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3312 return;
3313 }
3314
3315 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3316 if (m_notify == NULL)
3317 /* no space left */
3318 return;
3319 SCTP_BUF_LEN(m_notify) = 0;
3320 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3321 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3322 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3323 pdapi->pdapi_flags = 0;
3324 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3325 pdapi->pdapi_indication = error;
3326 pdapi->pdapi_stream = (val >> 16);
3327 pdapi->pdapi_seq = (val & 0x0000ffff);
3328 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3329
3330 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3331 SCTP_BUF_NEXT(m_notify) = NULL;
3332 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3333 0, 0, stcb->asoc.context, 0, 0, 0,
3334 m_notify);
3335 if (control == NULL) {
3336 /* no memory */
3337 sctp_m_freem(m_notify);
3338 return;
3339 }
3340 control->length = SCTP_BUF_LEN(m_notify);
3341 control->spec_flags = M_NOTIFICATION;
3342 /* not that we need this */
3343 control->tail_mbuf = m_notify;
3344 sb = &stcb->sctp_socket->so_rcv;
3345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3346 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3347 }
3348 sctp_sballoc(stcb, sb, m_notify);
3349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3350 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3351 }
3352 control->end_added = 1;
3353 if (stcb->asoc.control_pdapi)
3354 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3355 else {
3356 /* we really should not see this case */
3357 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3358 }
3359 if (stcb->sctp_ep && stcb->sctp_socket) {
3360 /* This should always be the case */
3361 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3362 struct socket *so;
3363
3364 so = SCTP_INP_SO(stcb->sctp_ep);
3365 if (!so_locked) {
3366 atomic_add_int(&stcb->asoc.refcnt, 1);
3367 SCTP_TCB_UNLOCK(stcb);
3368 SCTP_SOCKET_LOCK(so, 1);
3369 SCTP_TCB_LOCK(stcb);
3370 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3371 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3372 SCTP_SOCKET_UNLOCK(so, 1);
3373 return;
3374 }
3375 }
3376 #endif
3377 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3378 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3379 if (!so_locked) {
3380 SCTP_SOCKET_UNLOCK(so, 1);
3381 }
3382 #endif
3383 }
3384 }
3385
3386 static void
3387 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3388 {
3389 struct mbuf *m_notify;
3390 struct sctp_shutdown_event *sse;
3391 struct sctp_queued_to_read *control;
3392
3393 /*
3394 * For TCP model AND UDP connected sockets we will send an error up
3395 * when an SHUTDOWN completes
3396 */
3397 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3398 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3399 /* mark socket closed for read/write and wakeup! */
3400 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3401 struct socket *so;
3402
3403 so = SCTP_INP_SO(stcb->sctp_ep);
3404 atomic_add_int(&stcb->asoc.refcnt, 1);
3405 SCTP_TCB_UNLOCK(stcb);
3406 SCTP_SOCKET_LOCK(so, 1);
3407 SCTP_TCB_LOCK(stcb);
3408 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3409 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3410 SCTP_SOCKET_UNLOCK(so, 1);
3411 return;
3412 }
3413 #endif
3414 socantsendmore(stcb->sctp_socket);
3415 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3416 SCTP_SOCKET_UNLOCK(so, 1);
3417 #endif
3418 }
3419 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3420 /* event not enabled */
3421 return;
3422 }
3423
3424 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3425 if (m_notify == NULL)
3426 /* no space left */
3427 return;
3428 sse = mtod(m_notify, struct sctp_shutdown_event *);
3429 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3430 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3431 sse->sse_flags = 0;
3432 sse->sse_length = sizeof(struct sctp_shutdown_event);
3433 sse->sse_assoc_id = sctp_get_associd(stcb);
3434
3435 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3436 SCTP_BUF_NEXT(m_notify) = NULL;
3437
3438 /* append to socket */
3439 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3440 0, 0, stcb->asoc.context, 0, 0, 0,
3441 m_notify);
3442 if (control == NULL) {
3443 /* no memory */
3444 sctp_m_freem(m_notify);
3445 return;
3446 }
3447 control->length = SCTP_BUF_LEN(m_notify);
3448 control->spec_flags = M_NOTIFICATION;
3449 /* not that we need this */
3450 control->tail_mbuf = m_notify;
3451 sctp_add_to_readq(stcb->sctp_ep, stcb,
3452 control,
3453 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3454 }
3455
3456 static void
3457 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3458 int so_locked
3459 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3460 SCTP_UNUSED
3461 #endif
3462 )
3463 {
3464 struct mbuf *m_notify;
3465 struct sctp_sender_dry_event *event;
3466 struct sctp_queued_to_read *control;
3467
3468 if ((stcb == NULL) ||
3469 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3470 /* event not enabled */
3471 return;
3472 }
3473
3474 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3475 if (m_notify == NULL) {
3476 /* no space left */
3477 return;
3478 }
3479 SCTP_BUF_LEN(m_notify) = 0;
3480 event = mtod(m_notify, struct sctp_sender_dry_event *);
3481 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3482 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3483 event->sender_dry_flags = 0;
3484 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3485 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3486
3487 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3488 SCTP_BUF_NEXT(m_notify) = NULL;
3489
3490 /* append to socket */
3491 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3492 0, 0, stcb->asoc.context, 0, 0, 0,
3493 m_notify);
3494 if (control == NULL) {
3495 /* no memory */
3496 sctp_m_freem(m_notify);
3497 return;
3498 }
3499 control->length = SCTP_BUF_LEN(m_notify);
3500 control->spec_flags = M_NOTIFICATION;
3501 /* not that we need this */
3502 control->tail_mbuf = m_notify;
3503 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3504 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3505 }
3506
3507
3508 void
3509 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3510 {
3511 struct mbuf *m_notify;
3512 struct sctp_queued_to_read *control;
3513 struct sctp_stream_change_event *stradd;
3514
3515 if ((stcb == NULL) ||
3516 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3517 /* event not enabled */
3518 return;
3519 }
3520 if ((stcb->asoc.peer_req_out) && flag) {
3521 /* Peer made the request, don't tell the local user */
3522 stcb->asoc.peer_req_out = 0;
3523 return;
3524 }
3525 stcb->asoc.peer_req_out = 0;
3526 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3527 if (m_notify == NULL)
3528 /* no space left */
3529 return;
3530 SCTP_BUF_LEN(m_notify) = 0;
3531 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3532 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3533 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3534 stradd->strchange_flags = flag;
3535 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3536 stradd->strchange_assoc_id = sctp_get_associd(stcb);
3537 stradd->strchange_instrms = numberin;
3538 stradd->strchange_outstrms = numberout;
3539 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3540 SCTP_BUF_NEXT(m_notify) = NULL;
3541 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3542 /* no space */
3543 sctp_m_freem(m_notify);
3544 return;
3545 }
3546 /* append to socket */
3547 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3548 0, 0, stcb->asoc.context, 0, 0, 0,
3549 m_notify);
3550 if (control == NULL) {
3551 /* no memory */
3552 sctp_m_freem(m_notify);
3553 return;
3554 }
3555 control->length = SCTP_BUF_LEN(m_notify);
3556 control->spec_flags = M_NOTIFICATION;
3557 /* not that we need this */
3558 control->tail_mbuf = m_notify;
3559 sctp_add_to_readq(stcb->sctp_ep, stcb,
3560 control,
3561 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3562 }
3563
3564 void
3565 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3566 {
3567 struct mbuf *m_notify;
3568 struct sctp_queued_to_read *control;
3569 struct sctp_assoc_reset_event *strasoc;
3570
3571 if ((stcb == NULL) ||
3572 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3573 /* event not enabled */
3574 return;
3575 }
3576 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3577 if (m_notify == NULL)
3578 /* no space left */
3579 return;
3580 SCTP_BUF_LEN(m_notify) = 0;
3581 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3582 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3583 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3584 strasoc->assocreset_flags = flag;
3585 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3586 strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3587 strasoc->assocreset_local_tsn = sending_tsn;
3588 strasoc->assocreset_remote_tsn = recv_tsn;
3589 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3590 SCTP_BUF_NEXT(m_notify) = NULL;
3591 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3592 /* no space */
3593 sctp_m_freem(m_notify);
3594 return;
3595 }
3596 /* append to socket */
3597 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3598 0, 0, stcb->asoc.context, 0, 0, 0,
3599 m_notify);
3600 if (control == NULL) {
3601 /* no memory */
3602 sctp_m_freem(m_notify);
3603 return;
3604 }
3605 control->length = SCTP_BUF_LEN(m_notify);
3606 control->spec_flags = M_NOTIFICATION;
3607 /* not that we need this */
3608 control->tail_mbuf = m_notify;
3609 sctp_add_to_readq(stcb->sctp_ep, stcb,
3610 control,
3611 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3612 }
3613
3614
3615
3616 static void
3617 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3618 int number_entries, uint16_t *list, int flag)
3619 {
3620 struct mbuf *m_notify;
3621 struct sctp_queued_to_read *control;
3622 struct sctp_stream_reset_event *strreset;
3623 int len;
3624
3625 if ((stcb == NULL) ||
3626 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3627 /* event not enabled */
3628 return;
3629 }
3630
3631 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3632 if (m_notify == NULL)
3633 /* no space left */
3634 return;
3635 SCTP_BUF_LEN(m_notify) = 0;
3636 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3637 if (len > M_TRAILINGSPACE(m_notify)) {
3638 /* never enough room */
3639 sctp_m_freem(m_notify);
3640 return;
3641 }
3642 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3643 memset(strreset, 0, len);
3644 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3645 strreset->strreset_flags = flag;
3646 strreset->strreset_length = len;
3647 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3648 if (number_entries) {
3649 int i;
3650
3651 for (i = 0; i < number_entries; i++) {
3652 strreset->strreset_stream_list[i] = ntohs(list[i]);
3653 }
3654 }
3655 SCTP_BUF_LEN(m_notify) = len;
3656 SCTP_BUF_NEXT(m_notify) = NULL;
3657 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3658 /* no space */
3659 sctp_m_freem(m_notify);
3660 return;
3661 }
3662 /* append to socket */
3663 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3664 0, 0, stcb->asoc.context, 0, 0, 0,
3665 m_notify);
3666 if (control == NULL) {
3667 /* no memory */
3668 sctp_m_freem(m_notify);
3669 return;
3670 }
3671 control->length = SCTP_BUF_LEN(m_notify);
3672 control->spec_flags = M_NOTIFICATION;
3673 /* not that we need this */
3674 control->tail_mbuf = m_notify;
3675 sctp_add_to_readq(stcb->sctp_ep, stcb,
3676 control,
3677 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3678 }
3679
3680
3681 static void
3682 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3683 {
3684 struct mbuf *m_notify;
3685 struct sctp_remote_error *sre;
3686 struct sctp_queued_to_read *control;
3687 unsigned int notif_len;
3688 uint16_t chunk_len;
3689
3690 if ((stcb == NULL) ||
3691 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3692 return;
3693 }
3694 if (chunk != NULL) {
3695 chunk_len = ntohs(chunk->ch.chunk_length);
3696 /*
3697 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3698 * contiguous.
3699 */
3700 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3701 chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3702 }
3703 } else {
3704 chunk_len = 0;
3705 }
3706 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3707 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3708 if (m_notify == NULL) {
3709 /* Retry with smaller value. */
3710 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3711 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3712 if (m_notify == NULL) {
3713 return;
3714 }
3715 }
3716 SCTP_BUF_NEXT(m_notify) = NULL;
3717 sre = mtod(m_notify, struct sctp_remote_error *);
3718 memset(sre, 0, notif_len);
3719 sre->sre_type = SCTP_REMOTE_ERROR;
3720 sre->sre_flags = 0;
3721 sre->sre_length = sizeof(struct sctp_remote_error);
3722 sre->sre_error = error;
3723 sre->sre_assoc_id = sctp_get_associd(stcb);
3724 if (notif_len > sizeof(struct sctp_remote_error)) {
3725 memcpy(sre->sre_data, chunk, chunk_len);
3726 sre->sre_length += chunk_len;
3727 }
3728 SCTP_BUF_LEN(m_notify) = sre->sre_length;
3729 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3730 0, 0, stcb->asoc.context, 0, 0, 0,
3731 m_notify);
3732 if (control != NULL) {
3733 control->length = SCTP_BUF_LEN(m_notify);
3734 control->spec_flags = M_NOTIFICATION;
3735 /* not that we need this */
3736 control->tail_mbuf = m_notify;
3737 sctp_add_to_readq(stcb->sctp_ep, stcb,
3738 control,
3739 &stcb->sctp_socket->so_rcv, 1,
3740 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3741 } else {
3742 sctp_m_freem(m_notify);
3743 }
3744 }
3745
3746
3747 void
3748 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3749 uint32_t error, void *data, int so_locked
3750 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3751 SCTP_UNUSED
3752 #endif
3753 )
3754 {
3755 if ((stcb == NULL) ||
3756 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3757 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3758 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3759 /* If the socket is gone we are out of here */
3760 return;
3761 }
3762 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3763 return;
3764 }
3765 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3766 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3767 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3768 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3769 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3770 /* Don't report these in front states */
3771 return;
3772 }
3773 }
3774 switch (notification) {
3775 case SCTP_NOTIFY_ASSOC_UP:
3776 if (stcb->asoc.assoc_up_sent == 0) {
3777 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3778 stcb->asoc.assoc_up_sent = 1;
3779 }
3780 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3781 sctp_notify_adaptation_layer(stcb);
3782 }
3783 if (stcb->asoc.auth_supported == 0) {
3784 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3785 NULL, so_locked);
3786 }
3787 break;
3788 case SCTP_NOTIFY_ASSOC_DOWN:
3789 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3790 break;
3791 case SCTP_NOTIFY_INTERFACE_DOWN:
3792 {
3793 struct sctp_nets *net;
3794
3795 net = (struct sctp_nets *)data;
3796 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3797 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3798 break;
3799 }
3800 case SCTP_NOTIFY_INTERFACE_UP:
3801 {
3802 struct sctp_nets *net;
3803
3804 net = (struct sctp_nets *)data;
3805 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3806 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3807 break;
3808 }
3809 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3810 {
3811 struct sctp_nets *net;
3812
3813 net = (struct sctp_nets *)data;
3814 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3815 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3816 break;
3817 }
3818 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3819 sctp_notify_send_failed2(stcb, error,
3820 (struct sctp_stream_queue_pending *)data, so_locked);
3821 break;
3822 case SCTP_NOTIFY_SENT_DG_FAIL:
3823 sctp_notify_send_failed(stcb, 1, error,
3824 (struct sctp_tmit_chunk *)data, so_locked);
3825 break;
3826 case SCTP_NOTIFY_UNSENT_DG_FAIL:
3827 sctp_notify_send_failed(stcb, 0, error,
3828 (struct sctp_tmit_chunk *)data, so_locked);
3829 break;
3830 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3831 {
3832 uint32_t val;
3833
3834 val = *((uint32_t *)data);
3835
3836 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3837 break;
3838 }
3839 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3840 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3841 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3842 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3843 } else {
3844 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3845 }
3846 break;
3847 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3848 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3849 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3850 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3851 } else {
3852 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3853 }
3854 break;
3855 case SCTP_NOTIFY_ASSOC_RESTART:
3856 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3857 if (stcb->asoc.auth_supported == 0) {
3858 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3859 NULL, so_locked);
3860 }
3861 break;
3862 case SCTP_NOTIFY_STR_RESET_SEND:
3863 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3864 break;
3865 case SCTP_NOTIFY_STR_RESET_RECV:
3866 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3867 break;
3868 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3869 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3870 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3871 break;
3872 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3873 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3874 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3875 break;
3876 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3877 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3878 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3879 break;
3880 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3881 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3882 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3883 break;
3884 case SCTP_NOTIFY_ASCONF_ADD_IP:
3885 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3886 error, so_locked);
3887 break;
3888 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3889 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3890 error, so_locked);
3891 break;
3892 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3893 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3894 error, so_locked);
3895 break;
3896 case SCTP_NOTIFY_PEER_SHUTDOWN:
3897 sctp_notify_shutdown_event(stcb);
3898 break;
3899 case SCTP_NOTIFY_AUTH_NEW_KEY:
3900 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3901 (uint16_t)(uintptr_t)data,
3902 so_locked);
3903 break;
3904 case SCTP_NOTIFY_AUTH_FREE_KEY:
3905 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3906 (uint16_t)(uintptr_t)data,
3907 so_locked);
3908 break;
3909 case SCTP_NOTIFY_NO_PEER_AUTH:
3910 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3911 (uint16_t)(uintptr_t)data,
3912 so_locked);
3913 break;
3914 case SCTP_NOTIFY_SENDER_DRY:
3915 sctp_notify_sender_dry_event(stcb, so_locked);
3916 break;
3917 case SCTP_NOTIFY_REMOTE_ERROR:
3918 sctp_notify_remote_error(stcb, error, data);
3919 break;
3920 default:
3921 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3922 __func__, notification, notification);
3923 break;
3924 } /* end switch */
3925 }
3926
3927 void
3928 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked
3929 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3930 SCTP_UNUSED
3931 #endif
3932 )
3933 {
3934 struct sctp_association *asoc;
3935 struct sctp_stream_out *outs;
3936 struct sctp_tmit_chunk *chk, *nchk;
3937 struct sctp_stream_queue_pending *sp, *nsp;
3938 int i;
3939
3940 if (stcb == NULL) {
3941 return;
3942 }
3943 asoc = &stcb->asoc;
3944 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3945 /* already being freed */
3946 return;
3947 }
3948 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3949 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3950 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3951 return;
3952 }
3953 /* now through all the gunk freeing chunks */
3954 /* sent queue SHOULD be empty */
3955 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3956 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3957 asoc->sent_queue_cnt--;
3958 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3959 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3960 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3961 #ifdef INVARIANTS
3962 } else {
3963 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3964 #endif
3965 }
3966 }
3967 if (chk->data != NULL) {
3968 sctp_free_bufspace(stcb, asoc, chk, 1);
3969 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3970 error, chk, so_locked);
3971 if (chk->data) {
3972 sctp_m_freem(chk->data);
3973 chk->data = NULL;
3974 }
3975 }
3976 sctp_free_a_chunk(stcb, chk, so_locked);
3977 /* sa_ignore FREED_MEMORY */
3978 }
3979 /* pending send queue SHOULD be empty */
3980 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3981 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3982 asoc->send_queue_cnt--;
3983 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3984 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3985 #ifdef INVARIANTS
3986 } else {
3987 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3988 #endif
3989 }
3990 if (chk->data != NULL) {
3991 sctp_free_bufspace(stcb, asoc, chk, 1);
3992 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3993 error, chk, so_locked);
3994 if (chk->data) {
3995 sctp_m_freem(chk->data);
3996 chk->data = NULL;
3997 }
3998 }
3999 sctp_free_a_chunk(stcb, chk, so_locked);
4000 /* sa_ignore FREED_MEMORY */
4001 }
4002 for (i = 0; i < asoc->streamoutcnt; i++) {
4003 /* For each stream */
4004 outs = &asoc->strmout[i];
4005 /* clean up any sends there */
4006 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4007 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4008 TAILQ_REMOVE(&outs->outqueue, sp, next);
4009 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4010 sctp_free_spbufspace(stcb, asoc, sp);
4011 if (sp->data) {
4012 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4013 error, (void *)sp, so_locked);
4014 if (sp->data) {
4015 sctp_m_freem(sp->data);
4016 sp->data = NULL;
4017 sp->tail_mbuf = NULL;
4018 sp->length = 0;
4019 }
4020 }
4021 if (sp->net) {
4022 sctp_free_remote_addr(sp->net);
4023 sp->net = NULL;
4024 }
4025 /* Free the chunk */
4026 sctp_free_a_strmoq(stcb, sp, so_locked);
4027 /* sa_ignore FREED_MEMORY */
4028 }
4029 }
4030 }
4031
4032 void
4033 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4034 struct sctp_abort_chunk *abort, int so_locked
4035 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4036 SCTP_UNUSED
4037 #endif
4038 )
4039 {
4040 if (stcb == NULL) {
4041 return;
4042 }
4043 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4044 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4045 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4046 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4047 }
4048 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4049 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4050 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4051 return;
4052 }
4053 SCTP_TCB_SEND_LOCK(stcb);
4054 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4055 /* Tell them we lost the asoc */
4056 sctp_report_all_outbound(stcb, error, so_locked);
4057 SCTP_TCB_SEND_UNLOCK(stcb);
4058 if (from_peer) {
4059 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4060 } else {
4061 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4062 }
4063 }
4064
4065 void
4066 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4067 struct mbuf *m, int iphlen,
4068 struct sockaddr *src, struct sockaddr *dst,
4069 struct sctphdr *sh, struct mbuf *op_err,
4070 uint8_t mflowtype, uint32_t mflowid,
4071 uint32_t vrf_id, uint16_t port)
4072 {
4073 uint32_t vtag;
4074 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4075 struct socket *so;
4076 #endif
4077
4078 vtag = 0;
4079 if (stcb != NULL) {
4080 vtag = stcb->asoc.peer_vtag;
4081 vrf_id = stcb->asoc.vrf_id;
4082 }
4083 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4084 mflowtype, mflowid, inp->fibnum,
4085 vrf_id, port);
4086 if (stcb != NULL) {
4087 /* We have a TCB to abort, send notification too */
4088 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4089 /* Ok, now lets free it */
4090 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4091 so = SCTP_INP_SO(inp);
4092 atomic_add_int(&stcb->asoc.refcnt, 1);
4093 SCTP_TCB_UNLOCK(stcb);
4094 SCTP_SOCKET_LOCK(so, 1);
4095 SCTP_TCB_LOCK(stcb);
4096 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4097 #endif
4098 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4099 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4100 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4101 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4102 }
4103 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4104 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4105 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106 SCTP_SOCKET_UNLOCK(so, 1);
4107 #endif
4108 }
4109 }
4110 #ifdef SCTP_ASOCLOG_OF_TSNS
4111 void
4112 sctp_print_out_track_log(struct sctp_tcb *stcb)
4113 {
4114 #ifdef NOSIY_PRINTS
4115 int i;
4116
4117 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4118 SCTP_PRINTF("IN bound TSN log-aaa\n");
4119 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4120 SCTP_PRINTF("None rcvd\n");
4121 goto none_in;
4122 }
4123 if (stcb->asoc.tsn_in_wrapped) {
4124 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4125 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4126 stcb->asoc.in_tsnlog[i].tsn,
4127 stcb->asoc.in_tsnlog[i].strm,
4128 stcb->asoc.in_tsnlog[i].seq,
4129 stcb->asoc.in_tsnlog[i].flgs,
4130 stcb->asoc.in_tsnlog[i].sz);
4131 }
4132 }
4133 if (stcb->asoc.tsn_in_at) {
4134 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4135 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4136 stcb->asoc.in_tsnlog[i].tsn,
4137 stcb->asoc.in_tsnlog[i].strm,
4138 stcb->asoc.in_tsnlog[i].seq,
4139 stcb->asoc.in_tsnlog[i].flgs,
4140 stcb->asoc.in_tsnlog[i].sz);
4141 }
4142 }
4143 none_in:
4144 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4145 if ((stcb->asoc.tsn_out_at == 0) &&
4146 (stcb->asoc.tsn_out_wrapped == 0)) {
4147 SCTP_PRINTF("None sent\n");
4148 }
4149 if (stcb->asoc.tsn_out_wrapped) {
4150 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4151 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4152 stcb->asoc.out_tsnlog[i].tsn,
4153 stcb->asoc.out_tsnlog[i].strm,
4154 stcb->asoc.out_tsnlog[i].seq,
4155 stcb->asoc.out_tsnlog[i].flgs,
4156 stcb->asoc.out_tsnlog[i].sz);
4157 }
4158 }
4159 if (stcb->asoc.tsn_out_at) {
4160 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4161 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4162 stcb->asoc.out_tsnlog[i].tsn,
4163 stcb->asoc.out_tsnlog[i].strm,
4164 stcb->asoc.out_tsnlog[i].seq,
4165 stcb->asoc.out_tsnlog[i].flgs,
4166 stcb->asoc.out_tsnlog[i].sz);
4167 }
4168 }
4169 #endif
4170 }
4171 #endif
4172
4173 void
4174 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4175 struct mbuf *op_err,
4176 int so_locked
4177 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4178 SCTP_UNUSED
4179 #endif
4180 )
4181 {
4182 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4183 struct socket *so;
4184 #endif
4185
4186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4187 so = SCTP_INP_SO(inp);
4188 #endif
4189 if (stcb == NULL) {
4190 /* Got to have a TCB */
4191 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4192 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4193 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4194 SCTP_CALLED_DIRECTLY_NOCMPSET);
4195 }
4196 }
4197 return;
4198 }
4199 /* notify the peer */
4200 sctp_send_abort_tcb(stcb, op_err, so_locked);
4201 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4202 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4203 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4204 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4205 }
4206 /* notify the ulp */
4207 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4208 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4209 }
4210 /* now free the asoc */
4211 #ifdef SCTP_ASOCLOG_OF_TSNS
4212 sctp_print_out_track_log(stcb);
4213 #endif
4214 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4215 if (!so_locked) {
4216 atomic_add_int(&stcb->asoc.refcnt, 1);
4217 SCTP_TCB_UNLOCK(stcb);
4218 SCTP_SOCKET_LOCK(so, 1);
4219 SCTP_TCB_LOCK(stcb);
4220 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4221 }
4222 #endif
4223 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4224 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4225 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4226 if (!so_locked) {
4227 SCTP_SOCKET_UNLOCK(so, 1);
4228 }
4229 #endif
4230 }
4231
4232 void
4233 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4234 struct sockaddr *src, struct sockaddr *dst,
4235 struct sctphdr *sh, struct sctp_inpcb *inp,
4236 struct mbuf *cause,
4237 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4238 uint32_t vrf_id, uint16_t port)
4239 {
4240 struct sctp_chunkhdr *ch, chunk_buf;
4241 unsigned int chk_length;
4242 int contains_init_chunk;
4243
4244 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4245 /* Generate a TO address for future reference */
4246 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4247 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4248 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4249 SCTP_CALLED_DIRECTLY_NOCMPSET);
4250 }
4251 }
4252 contains_init_chunk = 0;
4253 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4254 sizeof(*ch), (uint8_t *)&chunk_buf);
4255 while (ch != NULL) {
4256 chk_length = ntohs(ch->chunk_length);
4257 if (chk_length < sizeof(*ch)) {
4258 /* break to abort land */
4259 break;
4260 }
4261 switch (ch->chunk_type) {
4262 case SCTP_INIT:
4263 contains_init_chunk = 1;
4264 break;
4265 case SCTP_PACKET_DROPPED:
4266 /* we don't respond to pkt-dropped */
4267 return;
4268 case SCTP_ABORT_ASSOCIATION:
4269 /* we don't respond with an ABORT to an ABORT */
4270 return;
4271 case SCTP_SHUTDOWN_COMPLETE:
4272 /*
4273 * we ignore it since we are not waiting for it and
4274 * peer is gone
4275 */
4276 return;
4277 case SCTP_SHUTDOWN_ACK:
4278 sctp_send_shutdown_complete2(src, dst, sh,
4279 mflowtype, mflowid, fibnum,
4280 vrf_id, port);
4281 return;
4282 default:
4283 break;
4284 }
4285 offset += SCTP_SIZE32(chk_length);
4286 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4287 sizeof(*ch), (uint8_t *)&chunk_buf);
4288 }
4289 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4290 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4291 (contains_init_chunk == 0))) {
4292 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4293 mflowtype, mflowid, fibnum,
4294 vrf_id, port);
4295 }
4296 }
4297
4298 /*
4299 * check the inbound datagram to make sure there is not an abort inside it,
4300 * if there is return 1, else return 0.
4301 */
4302 int
4303 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4304 {
4305 struct sctp_chunkhdr *ch;
4306 struct sctp_init_chunk *init_chk, chunk_buf;
4307 int offset;
4308 unsigned int chk_length;
4309
4310 offset = iphlen + sizeof(struct sctphdr);
4311 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4312 (uint8_t *)&chunk_buf);
4313 while (ch != NULL) {
4314 chk_length = ntohs(ch->chunk_length);
4315 if (chk_length < sizeof(*ch)) {
4316 /* packet is probably corrupt */
4317 break;
4318 }
4319 /* we seem to be ok, is it an abort? */
4320 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4321 /* yep, tell them */
4322 return (1);
4323 }
4324 if (ch->chunk_type == SCTP_INITIATION) {
4325 /* need to update the Vtag */
4326 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4327 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4328 if (init_chk != NULL) {
4329 *vtagfill = ntohl(init_chk->init.initiate_tag);
4330 }
4331 }
4332 /* Nope, move to the next chunk */
4333 offset += SCTP_SIZE32(chk_length);
4334 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4335 sizeof(*ch), (uint8_t *)&chunk_buf);
4336 }
4337 return (0);
4338 }
4339
4340 /*
4341 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4342 * set (i.e. it's 0) so, create this function to compare link local scopes
4343 */
4344 #ifdef INET6
4345 uint32_t
4346 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4347 {
4348 struct sockaddr_in6 a, b;
4349
4350 /* save copies */
4351 a = *addr1;
4352 b = *addr2;
4353
4354 if (a.sin6_scope_id == 0)
4355 if (sa6_recoverscope(&a)) {
4356 /* can't get scope, so can't match */
4357 return (0);
4358 }
4359 if (b.sin6_scope_id == 0)
4360 if (sa6_recoverscope(&b)) {
4361 /* can't get scope, so can't match */
4362 return (0);
4363 }
4364 if (a.sin6_scope_id != b.sin6_scope_id)
4365 return (0);
4366
4367 return (1);
4368 }
4369
4370 /*
4371 * returns a sockaddr_in6 with embedded scope recovered and removed
4372 */
4373 struct sockaddr_in6 *
4374 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4375 {
4376 /* check and strip embedded scope junk */
4377 if (addr->sin6_family == AF_INET6) {
4378 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4379 if (addr->sin6_scope_id == 0) {
4380 *store = *addr;
4381 if (!sa6_recoverscope(store)) {
4382 /* use the recovered scope */
4383 addr = store;
4384 }
4385 } else {
4386 /* else, return the original "to" addr */
4387 in6_clearscope(&addr->sin6_addr);
4388 }
4389 }
4390 }
4391 return (addr);
4392 }
4393 #endif
4394
4395 /*
4396 * are the two addresses the same? currently a "scopeless" check returns: 1
4397 * if same, 0 if not
4398 */
4399 int
4400 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4401 {
4402
4403 /* must be valid */
4404 if (sa1 == NULL || sa2 == NULL)
4405 return (0);
4406
4407 /* must be the same family */
4408 if (sa1->sa_family != sa2->sa_family)
4409 return (0);
4410
4411 switch (sa1->sa_family) {
4412 #ifdef INET6
4413 case AF_INET6:
4414 {
4415 /* IPv6 addresses */
4416 struct sockaddr_in6 *sin6_1, *sin6_2;
4417
4418 sin6_1 = (struct sockaddr_in6 *)sa1;
4419 sin6_2 = (struct sockaddr_in6 *)sa2;
4420 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4421 sin6_2));
4422 }
4423 #endif
4424 #ifdef INET
4425 case AF_INET:
4426 {
4427 /* IPv4 addresses */
4428 struct sockaddr_in *sin_1, *sin_2;
4429
4430 sin_1 = (struct sockaddr_in *)sa1;
4431 sin_2 = (struct sockaddr_in *)sa2;
4432 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4433 }
4434 #endif
4435 default:
4436 /* we don't do these... */
4437 return (0);
4438 }
4439 }
4440
4441 void
4442 sctp_print_address(struct sockaddr *sa)
4443 {
4444 #ifdef INET6
4445 char ip6buf[INET6_ADDRSTRLEN];
4446 #endif
4447
4448 switch (sa->sa_family) {
4449 #ifdef INET6
4450 case AF_INET6:
4451 {
4452 struct sockaddr_in6 *sin6;
4453
4454 sin6 = (struct sockaddr_in6 *)sa;
4455 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4456 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4457 ntohs(sin6->sin6_port),
4458 sin6->sin6_scope_id);
4459 break;
4460 }
4461 #endif
4462 #ifdef INET
4463 case AF_INET:
4464 {
4465 struct sockaddr_in *sin;
4466 unsigned char *p;
4467
4468 sin = (struct sockaddr_in *)sa;
4469 p = (unsigned char *)&sin->sin_addr;
4470 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4471 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4472 break;
4473 }
4474 #endif
4475 default:
4476 SCTP_PRINTF("?\n");
4477 break;
4478 }
4479 }
4480
4481 void
4482 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4483 struct sctp_inpcb *new_inp,
4484 struct sctp_tcb *stcb,
4485 int waitflags)
4486 {
4487 /*
4488 * go through our old INP and pull off any control structures that
4489 * belong to stcb and move then to the new inp.
4490 */
4491 struct socket *old_so, *new_so;
4492 struct sctp_queued_to_read *control, *nctl;
4493 struct sctp_readhead tmp_queue;
4494 struct mbuf *m;
4495 int error = 0;
4496
4497 old_so = old_inp->sctp_socket;
4498 new_so = new_inp->sctp_socket;
4499 TAILQ_INIT(&tmp_queue);
4500 error = sblock(&old_so->so_rcv, waitflags);
4501 if (error) {
4502 /*
4503 * Gak, can't get sblock, we have a problem. data will be
4504 * left stranded.. and we don't dare look at it since the
4505 * other thread may be reading something. Oh well, its a
4506 * screwed up app that does a peeloff OR a accept while
4507 * reading from the main socket... actually its only the
4508 * peeloff() case, since I think read will fail on a
4509 * listening socket..
4510 */
4511 return;
4512 }
4513 /* lock the socket buffers */
4514 SCTP_INP_READ_LOCK(old_inp);
4515 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4516 /* Pull off all for out target stcb */
4517 if (control->stcb == stcb) {
4518 /* remove it we want it */
4519 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4520 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4521 m = control->data;
4522 while (m) {
4523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4524 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4525 }
4526 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4528 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4529 }
4530 m = SCTP_BUF_NEXT(m);
4531 }
4532 }
4533 }
4534 SCTP_INP_READ_UNLOCK(old_inp);
4535 /* Remove the sb-lock on the old socket */
4536
4537 sbunlock(&old_so->so_rcv);
4538 /* Now we move them over to the new socket buffer */
4539 SCTP_INP_READ_LOCK(new_inp);
4540 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4541 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4542 m = control->data;
4543 while (m) {
4544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4545 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4546 }
4547 sctp_sballoc(stcb, &new_so->so_rcv, m);
4548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4549 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4550 }
4551 m = SCTP_BUF_NEXT(m);
4552 }
4553 }
4554 SCTP_INP_READ_UNLOCK(new_inp);
4555 }
4556
4557 void
4558 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4559 struct sctp_tcb *stcb,
4560 int so_locked
4561 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4562 SCTP_UNUSED
4563 #endif
4564 )
4565 {
4566 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4567 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4568 struct socket *so;
4569
4570 so = SCTP_INP_SO(inp);
4571 if (!so_locked) {
4572 if (stcb) {
4573 atomic_add_int(&stcb->asoc.refcnt, 1);
4574 SCTP_TCB_UNLOCK(stcb);
4575 }
4576 SCTP_SOCKET_LOCK(so, 1);
4577 if (stcb) {
4578 SCTP_TCB_LOCK(stcb);
4579 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4580 }
4581 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4582 SCTP_SOCKET_UNLOCK(so, 1);
4583 return;
4584 }
4585 }
4586 #endif
4587 sctp_sorwakeup(inp, inp->sctp_socket);
4588 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4589 if (!so_locked) {
4590 SCTP_SOCKET_UNLOCK(so, 1);
4591 }
4592 #endif
4593 }
4594 }
4595
4596 void
4597 sctp_add_to_readq(struct sctp_inpcb *inp,
4598 struct sctp_tcb *stcb,
4599 struct sctp_queued_to_read *control,
4600 struct sockbuf *sb,
4601 int end,
4602 int inp_read_lock_held,
4603 int so_locked
4604 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4605 SCTP_UNUSED
4606 #endif
4607 )
4608 {
4609 /*
4610 * Here we must place the control on the end of the socket read
4611 * queue AND increment sb_cc so that select will work properly on
4612 * read.
4613 */
4614 struct mbuf *m, *prev = NULL;
4615
4616 if (inp == NULL) {
4617 /* Gak, TSNH!! */
4618 #ifdef INVARIANTS
4619 panic("Gak, inp NULL on add_to_readq");
4620 #endif
4621 return;
4622 }
4623 if (inp_read_lock_held == 0)
4624 SCTP_INP_READ_LOCK(inp);
4625 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4626 sctp_free_remote_addr(control->whoFrom);
4627 if (control->data) {
4628 sctp_m_freem(control->data);
4629 control->data = NULL;
4630 }
4631 sctp_free_a_readq(stcb, control);
4632 if (inp_read_lock_held == 0)
4633 SCTP_INP_READ_UNLOCK(inp);
4634 return;
4635 }
4636 if (!(control->spec_flags & M_NOTIFICATION)) {
4637 atomic_add_int(&inp->total_recvs, 1);
4638 if (!control->do_not_ref_stcb) {
4639 atomic_add_int(&stcb->total_recvs, 1);
4640 }
4641 }
4642 m = control->data;
4643 control->held_length = 0;
4644 control->length = 0;
4645 while (m) {
4646 if (SCTP_BUF_LEN(m) == 0) {
4647 /* Skip mbufs with NO length */
4648 if (prev == NULL) {
4649 /* First one */
4650 control->data = sctp_m_free(m);
4651 m = control->data;
4652 } else {
4653 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4654 m = SCTP_BUF_NEXT(prev);
4655 }
4656 if (m == NULL) {
4657 control->tail_mbuf = prev;
4658 }
4659 continue;
4660 }
4661 prev = m;
4662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4663 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4664 }
4665 sctp_sballoc(stcb, sb, m);
4666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4667 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4668 }
4669 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4670 m = SCTP_BUF_NEXT(m);
4671 }
4672 if (prev != NULL) {
4673 control->tail_mbuf = prev;
4674 } else {
4675 /* Everything got collapsed out?? */
4676 sctp_free_remote_addr(control->whoFrom);
4677 sctp_free_a_readq(stcb, control);
4678 if (inp_read_lock_held == 0)
4679 SCTP_INP_READ_UNLOCK(inp);
4680 return;
4681 }
4682 if (end) {
4683 control->end_added = 1;
4684 }
4685 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4686 control->on_read_q = 1;
4687 if (inp_read_lock_held == 0)
4688 SCTP_INP_READ_UNLOCK(inp);
4689 if (inp && inp->sctp_socket) {
4690 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4691 }
4692 }
4693
4694 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4695 *************ALTERNATE ROUTING CODE
4696 */
4697
4698 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4699 *************ALTERNATE ROUTING CODE
4700 */
4701
4702 struct mbuf *
4703 sctp_generate_cause(uint16_t code, char *info)
4704 {
4705 struct mbuf *m;
4706 struct sctp_gen_error_cause *cause;
4707 size_t info_len;
4708 uint16_t len;
4709
4710 if ((code == 0) || (info == NULL)) {
4711 return (NULL);
4712 }
4713 info_len = strlen(info);
4714 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4715 return (NULL);
4716 }
4717 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4718 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4719 if (m != NULL) {
4720 SCTP_BUF_LEN(m) = len;
4721 cause = mtod(m, struct sctp_gen_error_cause *);
4722 cause->code = htons(code);
4723 cause->length = htons(len);
4724 memcpy(cause->info, info, info_len);
4725 }
4726 return (m);
4727 }
4728
4729 struct mbuf *
4730 sctp_generate_no_user_data_cause(uint32_t tsn)
4731 {
4732 struct mbuf *m;
4733 struct sctp_error_no_user_data *no_user_data_cause;
4734 uint16_t len;
4735
4736 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4737 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4738 if (m != NULL) {
4739 SCTP_BUF_LEN(m) = len;
4740 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4741 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4742 no_user_data_cause->cause.length = htons(len);
4743 no_user_data_cause->tsn = htonl(tsn);
4744 }
4745 return (m);
4746 }
4747
4748 #ifdef SCTP_MBCNT_LOGGING
4749 void
4750 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4751 struct sctp_tmit_chunk *tp1, int chk_cnt)
4752 {
4753 if (tp1->data == NULL) {
4754 return;
4755 }
4756 asoc->chunks_on_out_queue -= chk_cnt;
4757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4758 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4759 asoc->total_output_queue_size,
4760 tp1->book_size,
4761 0,
4762 tp1->mbcnt);
4763 }
4764 if (asoc->total_output_queue_size >= tp1->book_size) {
4765 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4766 } else {
4767 asoc->total_output_queue_size = 0;
4768 }
4769
4770 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4771 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4772 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4773 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4774 } else {
4775 stcb->sctp_socket->so_snd.sb_cc = 0;
4776
4777 }
4778 }
4779 }
4780
4781 #endif
4782
4783 int
4784 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4785 uint8_t sent, int so_locked
4786 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4787 SCTP_UNUSED
4788 #endif
4789 )
4790 {
4791 struct sctp_stream_out *strq;
4792 struct sctp_tmit_chunk *chk = NULL, *tp2;
4793 struct sctp_stream_queue_pending *sp;
4794 uint32_t mid;
4795 uint16_t sid;
4796 uint8_t foundeom = 0;
4797 int ret_sz = 0;
4798 int notdone;
4799 int do_wakeup_routine = 0;
4800
4801 sid = tp1->rec.data.sid;
4802 mid = tp1->rec.data.mid;
4803 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4804 stcb->asoc.abandoned_sent[0]++;
4805 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4806 stcb->asoc.strmout[sid].abandoned_sent[0]++;
4807 #if defined(SCTP_DETAILED_STR_STATS)
4808 stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4809 #endif
4810 } else {
4811 stcb->asoc.abandoned_unsent[0]++;
4812 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4813 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4814 #if defined(SCTP_DETAILED_STR_STATS)
4815 stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4816 #endif
4817 }
4818 do {
4819 ret_sz += tp1->book_size;
4820 if (tp1->data != NULL) {
4821 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4822 sctp_flight_size_decrease(tp1);
4823 sctp_total_flight_decrease(stcb, tp1);
4824 }
4825 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4826 stcb->asoc.peers_rwnd += tp1->send_size;
4827 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4828 if (sent) {
4829 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4830 } else {
4831 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4832 }
4833 if (tp1->data) {
4834 sctp_m_freem(tp1->data);
4835 tp1->data = NULL;
4836 }
4837 do_wakeup_routine = 1;
4838 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4839 stcb->asoc.sent_queue_cnt_removeable--;
4840 }
4841 }
4842 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4843 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4844 SCTP_DATA_NOT_FRAG) {
4845 /* not frag'ed we ae done */
4846 notdone = 0;
4847 foundeom = 1;
4848 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4849 /* end of frag, we are done */
4850 notdone = 0;
4851 foundeom = 1;
4852 } else {
4853 /*
4854 * Its a begin or middle piece, we must mark all of
4855 * it
4856 */
4857 notdone = 1;
4858 tp1 = TAILQ_NEXT(tp1, sctp_next);
4859 }
4860 } while (tp1 && notdone);
4861 if (foundeom == 0) {
4862 /*
4863 * The multi-part message was scattered across the send and
4864 * sent queue.
4865 */
4866 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4867 if ((tp1->rec.data.sid != sid) ||
4868 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4869 break;
4870 }
4871 /*
4872 * save to chk in case we have some on stream out
4873 * queue. If so and we have an un-transmitted one we
4874 * don't have to fudge the TSN.
4875 */
4876 chk = tp1;
4877 ret_sz += tp1->book_size;
4878 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4879 if (sent) {
4880 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4881 } else {
4882 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4883 }
4884 if (tp1->data) {
4885 sctp_m_freem(tp1->data);
4886 tp1->data = NULL;
4887 }
4888 /* No flight involved here book the size to 0 */
4889 tp1->book_size = 0;
4890 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4891 foundeom = 1;
4892 }
4893 do_wakeup_routine = 1;
4894 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4895 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4896 /*
4897 * on to the sent queue so we can wait for it to be
4898 * passed by.
4899 */
4900 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4901 sctp_next);
4902 stcb->asoc.send_queue_cnt--;
4903 stcb->asoc.sent_queue_cnt++;
4904 }
4905 }
4906 if (foundeom == 0) {
4907 /*
4908 * Still no eom found. That means there is stuff left on the
4909 * stream out queue.. yuck.
4910 */
4911 SCTP_TCB_SEND_LOCK(stcb);
4912 strq = &stcb->asoc.strmout[sid];
4913 sp = TAILQ_FIRST(&strq->outqueue);
4914 if (sp != NULL) {
4915 sp->discard_rest = 1;
4916 /*
4917 * We may need to put a chunk on the queue that
4918 * holds the TSN that would have been sent with the
4919 * LAST bit.
4920 */
4921 if (chk == NULL) {
4922 /* Yep, we have to */
4923 sctp_alloc_a_chunk(stcb, chk);
4924 if (chk == NULL) {
4925 /*
4926 * we are hosed. All we can do is
4927 * nothing.. which will cause an
4928 * abort if the peer is paying
4929 * attention.
4930 */
4931 goto oh_well;
4932 }
4933 memset(chk, 0, sizeof(*chk));
4934 chk->rec.data.rcv_flags = 0;
4935 chk->sent = SCTP_FORWARD_TSN_SKIP;
4936 chk->asoc = &stcb->asoc;
4937 if (stcb->asoc.idata_supported == 0) {
4938 if (sp->sinfo_flags & SCTP_UNORDERED) {
4939 chk->rec.data.mid = 0;
4940 } else {
4941 chk->rec.data.mid = strq->next_mid_ordered;
4942 }
4943 } else {
4944 if (sp->sinfo_flags & SCTP_UNORDERED) {
4945 chk->rec.data.mid = strq->next_mid_unordered;
4946 } else {
4947 chk->rec.data.mid = strq->next_mid_ordered;
4948 }
4949 }
4950 chk->rec.data.sid = sp->sid;
4951 chk->rec.data.ppid = sp->ppid;
4952 chk->rec.data.context = sp->context;
4953 chk->flags = sp->act_flags;
4954 chk->whoTo = NULL;
4955 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4956 strq->chunks_on_queues++;
4957 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4958 stcb->asoc.sent_queue_cnt++;
4959 stcb->asoc.pr_sctp_cnt++;
4960 }
4961 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4962 if (sp->sinfo_flags & SCTP_UNORDERED) {
4963 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4964 }
4965 if (stcb->asoc.idata_supported == 0) {
4966 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4967 strq->next_mid_ordered++;
4968 }
4969 } else {
4970 if (sp->sinfo_flags & SCTP_UNORDERED) {
4971 strq->next_mid_unordered++;
4972 } else {
4973 strq->next_mid_ordered++;
4974 }
4975 }
4976 oh_well:
4977 if (sp->data) {
4978 /*
4979 * Pull any data to free up the SB and allow
4980 * sender to "add more" while we will throw
4981 * away :-)
4982 */
4983 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4984 ret_sz += sp->length;
4985 do_wakeup_routine = 1;
4986 sp->some_taken = 1;
4987 sctp_m_freem(sp->data);
4988 sp->data = NULL;
4989 sp->tail_mbuf = NULL;
4990 sp->length = 0;
4991 }
4992 }
4993 SCTP_TCB_SEND_UNLOCK(stcb);
4994 }
4995 if (do_wakeup_routine) {
4996 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4997 struct socket *so;
4998
4999 so = SCTP_INP_SO(stcb->sctp_ep);
5000 if (!so_locked) {
5001 atomic_add_int(&stcb->asoc.refcnt, 1);
5002 SCTP_TCB_UNLOCK(stcb);
5003 SCTP_SOCKET_LOCK(so, 1);
5004 SCTP_TCB_LOCK(stcb);
5005 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5006 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5007 /* assoc was freed while we were unlocked */
5008 SCTP_SOCKET_UNLOCK(so, 1);
5009 return (ret_sz);
5010 }
5011 }
5012 #endif
5013 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5014 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5015 if (!so_locked) {
5016 SCTP_SOCKET_UNLOCK(so, 1);
5017 }
5018 #endif
5019 }
5020 return (ret_sz);
5021 }
5022
5023 /*
5024 * checks to see if the given address, sa, is one that is currently known by
5025 * the kernel note: can't distinguish the same address on multiple interfaces
5026 * and doesn't handle multiple addresses with different zone/scope id's note:
5027 * ifa_ifwithaddr() compares the entire sockaddr struct
5028 */
5029 struct sctp_ifa *
5030 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5031 int holds_lock)
5032 {
5033 struct sctp_laddr *laddr;
5034
5035 if (holds_lock == 0) {
5036 SCTP_INP_RLOCK(inp);
5037 }
5038
5039 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5040 if (laddr->ifa == NULL)
5041 continue;
5042 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5043 continue;
5044 #ifdef INET
5045 if (addr->sa_family == AF_INET) {
5046 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5047 laddr->ifa->address.sin.sin_addr.s_addr) {
5048 /* found him. */
5049 if (holds_lock == 0) {
5050 SCTP_INP_RUNLOCK(inp);
5051 }
5052 return (laddr->ifa);
5053 break;
5054 }
5055 }
5056 #endif
5057 #ifdef INET6
5058 if (addr->sa_family == AF_INET6) {
5059 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5060 &laddr->ifa->address.sin6)) {
5061 /* found him. */
5062 if (holds_lock == 0) {
5063 SCTP_INP_RUNLOCK(inp);
5064 }
5065 return (laddr->ifa);
5066 break;
5067 }
5068 }
5069 #endif
5070 }
5071 if (holds_lock == 0) {
5072 SCTP_INP_RUNLOCK(inp);
5073 }
5074 return (NULL);
5075 }
5076
5077 uint32_t
5078 sctp_get_ifa_hash_val(struct sockaddr *addr)
5079 {
5080 switch (addr->sa_family) {
5081 #ifdef INET
5082 case AF_INET:
5083 {
5084 struct sockaddr_in *sin;
5085
5086 sin = (struct sockaddr_in *)addr;
5087 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5088 }
5089 #endif
5090 #ifdef INET6
5091 case AF_INET6:
5092 {
5093 struct sockaddr_in6 *sin6;
5094 uint32_t hash_of_addr;
5095
5096 sin6 = (struct sockaddr_in6 *)addr;
5097 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5098 sin6->sin6_addr.s6_addr32[1] +
5099 sin6->sin6_addr.s6_addr32[2] +
5100 sin6->sin6_addr.s6_addr32[3]);
5101 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5102 return (hash_of_addr);
5103 }
5104 #endif
5105 default:
5106 break;
5107 }
5108 return (0);
5109 }
5110
5111 struct sctp_ifa *
5112 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5113 {
5114 struct sctp_ifa *sctp_ifap;
5115 struct sctp_vrf *vrf;
5116 struct sctp_ifalist *hash_head;
5117 uint32_t hash_of_addr;
5118
5119 if (holds_lock == 0)
5120 SCTP_IPI_ADDR_RLOCK();
5121
5122 vrf = sctp_find_vrf(vrf_id);
5123 if (vrf == NULL) {
5124 if (holds_lock == 0)
5125 SCTP_IPI_ADDR_RUNLOCK();
5126 return (NULL);
5127 }
5128
5129 hash_of_addr = sctp_get_ifa_hash_val(addr);
5130
5131 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5132 if (hash_head == NULL) {
5133 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5134 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5135 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5136 sctp_print_address(addr);
5137 SCTP_PRINTF("No such bucket for address\n");
5138 if (holds_lock == 0)
5139 SCTP_IPI_ADDR_RUNLOCK();
5140
5141 return (NULL);
5142 }
5143 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5144 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5145 continue;
5146 #ifdef INET
5147 if (addr->sa_family == AF_INET) {
5148 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5149 sctp_ifap->address.sin.sin_addr.s_addr) {
5150 /* found him. */
5151 if (holds_lock == 0)
5152 SCTP_IPI_ADDR_RUNLOCK();
5153 return (sctp_ifap);
5154 break;
5155 }
5156 }
5157 #endif
5158 #ifdef INET6
5159 if (addr->sa_family == AF_INET6) {
5160 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5161 &sctp_ifap->address.sin6)) {
5162 /* found him. */
5163 if (holds_lock == 0)
5164 SCTP_IPI_ADDR_RUNLOCK();
5165 return (sctp_ifap);
5166 break;
5167 }
5168 }
5169 #endif
5170 }
5171 if (holds_lock == 0)
5172 SCTP_IPI_ADDR_RUNLOCK();
5173 return (NULL);
5174 }
5175
5176 static void
5177 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5178 uint32_t rwnd_req)
5179 {
5180 /* User pulled some data, do we need a rwnd update? */
5181 int r_unlocked = 0;
5182 uint32_t dif, rwnd;
5183 struct socket *so = NULL;
5184
5185 if (stcb == NULL)
5186 return;
5187
5188 atomic_add_int(&stcb->asoc.refcnt, 1);
5189
5190 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5191 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5192 /* Pre-check If we are freeing no update */
5193 goto no_lock;
5194 }
5195 SCTP_INP_INCR_REF(stcb->sctp_ep);
5196 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5197 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5198 goto out;
5199 }
5200 so = stcb->sctp_socket;
5201 if (so == NULL) {
5202 goto out;
5203 }
5204 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5205 /* Have you have freed enough to look */
5206 *freed_so_far = 0;
5207 /* Yep, its worth a look and the lock overhead */
5208
5209 /* Figure out what the rwnd would be */
5210 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5211 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5212 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5213 } else {
5214 dif = 0;
5215 }
5216 if (dif >= rwnd_req) {
5217 if (hold_rlock) {
5218 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5219 r_unlocked = 1;
5220 }
5221 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5222 /*
5223 * One last check before we allow the guy possibly
5224 * to get in. There is a race, where the guy has not
5225 * reached the gate. In that case
5226 */
5227 goto out;
5228 }
5229 SCTP_TCB_LOCK(stcb);
5230 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5231 /* No reports here */
5232 SCTP_TCB_UNLOCK(stcb);
5233 goto out;
5234 }
5235 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5236 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5237
5238 sctp_chunk_output(stcb->sctp_ep, stcb,
5239 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5240 /* make sure no timer is running */
5241 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5242 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5243 SCTP_TCB_UNLOCK(stcb);
5244 } else {
5245 /* Update how much we have pending */
5246 stcb->freed_by_sorcv_sincelast = dif;
5247 }
5248 out:
5249 if (so && r_unlocked && hold_rlock) {
5250 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5251 }
5252
5253 SCTP_INP_DECR_REF(stcb->sctp_ep);
5254 no_lock:
5255 atomic_add_int(&stcb->asoc.refcnt, -1);
5256 return;
5257 }
5258
5259 int
5260 sctp_sorecvmsg(struct socket *so,
5261 struct uio *uio,
5262 struct mbuf **mp,
5263 struct sockaddr *from,
5264 int fromlen,
5265 int *msg_flags,
5266 struct sctp_sndrcvinfo *sinfo,
5267 int filling_sinfo)
5268 {
5269 /*
5270 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5271 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5272 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5273 * On the way out we may send out any combination of:
5274 * MSG_NOTIFICATION MSG_EOR
5275 *
5276 */
5277 struct sctp_inpcb *inp = NULL;
5278 ssize_t my_len = 0;
5279 ssize_t cp_len = 0;
5280 int error = 0;
5281 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5282 struct mbuf *m = NULL;
5283 struct sctp_tcb *stcb = NULL;
5284 int wakeup_read_socket = 0;
5285 int freecnt_applied = 0;
5286 int out_flags = 0, in_flags = 0;
5287 int block_allowed = 1;
5288 uint32_t freed_so_far = 0;
5289 ssize_t copied_so_far = 0;
5290 int in_eeor_mode = 0;
5291 int no_rcv_needed = 0;
5292 uint32_t rwnd_req = 0;
5293 int hold_sblock = 0;
5294 int hold_rlock = 0;
5295 ssize_t slen = 0;
5296 uint32_t held_length = 0;
5297 int sockbuf_lock = 0;
5298
5299 if (uio == NULL) {
5300 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5301 return (EINVAL);
5302 }
5303
5304 if (msg_flags) {
5305 in_flags = *msg_flags;
5306 if (in_flags & MSG_PEEK)
5307 SCTP_STAT_INCR(sctps_read_peeks);
5308 } else {
5309 in_flags = 0;
5310 }
5311 slen = uio->uio_resid;
5312
5313 /* Pull in and set up our int flags */
5314 if (in_flags & MSG_OOB) {
5315 /* Out of band's NOT supported */
5316 return (EOPNOTSUPP);
5317 }
5318 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5319 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5320 return (EINVAL);
5321 }
5322 if ((in_flags & (MSG_DONTWAIT
5323 | MSG_NBIO
5324 )) ||
5325 SCTP_SO_IS_NBIO(so)) {
5326 block_allowed = 0;
5327 }
5328 /* setup the endpoint */
5329 inp = (struct sctp_inpcb *)so->so_pcb;
5330 if (inp == NULL) {
5331 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5332 return (EFAULT);
5333 }
5334 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5335 /* Must be at least a MTU's worth */
5336 if (rwnd_req < SCTP_MIN_RWND)
5337 rwnd_req = SCTP_MIN_RWND;
5338 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5340 sctp_misc_ints(SCTP_SORECV_ENTER,
5341 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5342 }
5343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5344 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5345 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5346 }
5347
5348
5349 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5350 if (error) {
5351 goto release_unlocked;
5352 }
5353 sockbuf_lock = 1;
5354 restart:
5355
5356
5357 restart_nosblocks:
5358 if (hold_sblock == 0) {
5359 SOCKBUF_LOCK(&so->so_rcv);
5360 hold_sblock = 1;
5361 }
5362 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5363 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5364 goto out;
5365 }
5366 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5367 if (so->so_error) {
5368 error = so->so_error;
5369 if ((in_flags & MSG_PEEK) == 0)
5370 so->so_error = 0;
5371 goto out;
5372 } else {
5373 if (so->so_rcv.sb_cc == 0) {
5374 /* indicate EOF */
5375 error = 0;
5376 goto out;
5377 }
5378 }
5379 }
5380 if (so->so_rcv.sb_cc <= held_length) {
5381 if (so->so_error) {
5382 error = so->so_error;
5383 if ((in_flags & MSG_PEEK) == 0) {
5384 so->so_error = 0;
5385 }
5386 goto out;
5387 }
5388 if ((so->so_rcv.sb_cc == 0) &&
5389 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5390 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5391 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5392 /*
5393 * For active open side clear flags for
5394 * re-use passive open is blocked by
5395 * connect.
5396 */
5397 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5398 /*
5399 * You were aborted, passive side
5400 * always hits here
5401 */
5402 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5403 error = ECONNRESET;
5404 }
5405 so->so_state &= ~(SS_ISCONNECTING |
5406 SS_ISDISCONNECTING |
5407 SS_ISCONFIRMING |
5408 SS_ISCONNECTED);
5409 if (error == 0) {
5410 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5411 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5412 error = ENOTCONN;
5413 }
5414 }
5415 goto out;
5416 }
5417 }
5418 if (block_allowed) {
5419 error = sbwait(&so->so_rcv);
5420 if (error) {
5421 goto out;
5422 }
5423 held_length = 0;
5424 goto restart_nosblocks;
5425 } else {
5426 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5427 error = EWOULDBLOCK;
5428 goto out;
5429 }
5430 }
5431 if (hold_sblock == 1) {
5432 SOCKBUF_UNLOCK(&so->so_rcv);
5433 hold_sblock = 0;
5434 }
5435 /* we possibly have data we can read */
5436 /* sa_ignore FREED_MEMORY */
5437 control = TAILQ_FIRST(&inp->read_queue);
5438 if (control == NULL) {
5439 /*
5440 * This could be happening since the appender did the
5441 * increment but as not yet did the tailq insert onto the
5442 * read_queue
5443 */
5444 if (hold_rlock == 0) {
5445 SCTP_INP_READ_LOCK(inp);
5446 }
5447 control = TAILQ_FIRST(&inp->read_queue);
5448 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5449 #ifdef INVARIANTS
5450 panic("Huh, its non zero and nothing on control?");
5451 #endif
5452 so->so_rcv.sb_cc = 0;
5453 }
5454 SCTP_INP_READ_UNLOCK(inp);
5455 hold_rlock = 0;
5456 goto restart;
5457 }
5458
5459 if ((control->length == 0) &&
5460 (control->do_not_ref_stcb)) {
5461 /*
5462 * Clean up code for freeing assoc that left behind a
5463 * pdapi.. maybe a peer in EEOR that just closed after
5464 * sending and never indicated a EOR.
5465 */
5466 if (hold_rlock == 0) {
5467 hold_rlock = 1;
5468 SCTP_INP_READ_LOCK(inp);
5469 }
5470 control->held_length = 0;
5471 if (control->data) {
5472 /* Hmm there is data here .. fix */
5473 struct mbuf *m_tmp;
5474 int cnt = 0;
5475
5476 m_tmp = control->data;
5477 while (m_tmp) {
5478 cnt += SCTP_BUF_LEN(m_tmp);
5479 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5480 control->tail_mbuf = m_tmp;
5481 control->end_added = 1;
5482 }
5483 m_tmp = SCTP_BUF_NEXT(m_tmp);
5484 }
5485 control->length = cnt;
5486 } else {
5487 /* remove it */
5488 TAILQ_REMOVE(&inp->read_queue, control, next);
5489 /* Add back any hiddend data */
5490 sctp_free_remote_addr(control->whoFrom);
5491 sctp_free_a_readq(stcb, control);
5492 }
5493 if (hold_rlock) {
5494 hold_rlock = 0;
5495 SCTP_INP_READ_UNLOCK(inp);
5496 }
5497 goto restart;
5498 }
5499 if ((control->length == 0) &&
5500 (control->end_added == 1)) {
5501 /*
5502 * Do we also need to check for (control->pdapi_aborted ==
5503 * 1)?
5504 */
5505 if (hold_rlock == 0) {
5506 hold_rlock = 1;
5507 SCTP_INP_READ_LOCK(inp);
5508 }
5509 TAILQ_REMOVE(&inp->read_queue, control, next);
5510 if (control->data) {
5511 #ifdef INVARIANTS
5512 panic("control->data not null but control->length == 0");
5513 #else
5514 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5515 sctp_m_freem(control->data);
5516 control->data = NULL;
5517 #endif
5518 }
5519 if (control->aux_data) {
5520 sctp_m_free(control->aux_data);
5521 control->aux_data = NULL;
5522 }
5523 #ifdef INVARIANTS
5524 if (control->on_strm_q) {
5525 panic("About to free ctl:%p so:%p and its in %d",
5526 control, so, control->on_strm_q);
5527 }
5528 #endif
5529 sctp_free_remote_addr(control->whoFrom);
5530 sctp_free_a_readq(stcb, control);
5531 if (hold_rlock) {
5532 hold_rlock = 0;
5533 SCTP_INP_READ_UNLOCK(inp);
5534 }
5535 goto restart;
5536 }
5537 if (control->length == 0) {
5538 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5539 (filling_sinfo)) {
5540 /* find a more suitable one then this */
5541 ctl = TAILQ_NEXT(control, next);
5542 while (ctl) {
5543 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5544 (ctl->some_taken ||
5545 (ctl->spec_flags & M_NOTIFICATION) ||
5546 ((ctl->do_not_ref_stcb == 0) &&
5547 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5548 ) {
5549 /*-
5550 * If we have a different TCB next, and there is data
5551 * present. If we have already taken some (pdapi), OR we can
5552 * ref the tcb and no delivery as started on this stream, we
5553 * take it. Note we allow a notification on a different
5554 * assoc to be delivered..
5555 */
5556 control = ctl;
5557 goto found_one;
5558 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5559 (ctl->length) &&
5560 ((ctl->some_taken) ||
5561 ((ctl->do_not_ref_stcb == 0) &&
5562 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5563 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5564 /*-
5565 * If we have the same tcb, and there is data present, and we
5566 * have the strm interleave feature present. Then if we have
5567 * taken some (pdapi) or we can refer to tht tcb AND we have
5568 * not started a delivery for this stream, we can take it.
5569 * Note we do NOT allow a notificaiton on the same assoc to
5570 * be delivered.
5571 */
5572 control = ctl;
5573 goto found_one;
5574 }
5575 ctl = TAILQ_NEXT(ctl, next);
5576 }
5577 }
5578 /*
5579 * if we reach here, not suitable replacement is available
5580 * <or> fragment interleave is NOT on. So stuff the sb_cc
5581 * into the our held count, and its time to sleep again.
5582 */
5583 held_length = so->so_rcv.sb_cc;
5584 control->held_length = so->so_rcv.sb_cc;
5585 goto restart;
5586 }
5587 /* Clear the held length since there is something to read */
5588 control->held_length = 0;
5589 found_one:
5590 /*
5591 * If we reach here, control has a some data for us to read off.
5592 * Note that stcb COULD be NULL.
5593 */
5594 if (hold_rlock == 0) {
5595 hold_rlock = 1;
5596 SCTP_INP_READ_LOCK(inp);
5597 }
5598 control->some_taken++;
5599 stcb = control->stcb;
5600 if (stcb) {
5601 if ((control->do_not_ref_stcb == 0) &&
5602 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5603 if (freecnt_applied == 0)
5604 stcb = NULL;
5605 } else if (control->do_not_ref_stcb == 0) {
5606 /* you can't free it on me please */
5607 /*
5608 * The lock on the socket buffer protects us so the
5609 * free code will stop. But since we used the
5610 * socketbuf lock and the sender uses the tcb_lock
5611 * to increment, we need to use the atomic add to
5612 * the refcnt
5613 */
5614 if (freecnt_applied) {
5615 #ifdef INVARIANTS
5616 panic("refcnt already incremented");
5617 #else
5618 SCTP_PRINTF("refcnt already incremented?\n");
5619 #endif
5620 } else {
5621 atomic_add_int(&stcb->asoc.refcnt, 1);
5622 freecnt_applied = 1;
5623 }
5624 /*
5625 * Setup to remember how much we have not yet told
5626 * the peer our rwnd has opened up. Note we grab the
5627 * value from the tcb from last time. Note too that
5628 * sack sending clears this when a sack is sent,
5629 * which is fine. Once we hit the rwnd_req, we then
5630 * will go to the sctp_user_rcvd() that will not
5631 * lock until it KNOWs it MUST send a WUP-SACK.
5632 */
5633 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5634 stcb->freed_by_sorcv_sincelast = 0;
5635 }
5636 }
5637 if (stcb &&
5638 ((control->spec_flags & M_NOTIFICATION) == 0) &&
5639 control->do_not_ref_stcb == 0) {
5640 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5641 }
5642
5643 /* First lets get off the sinfo and sockaddr info */
5644 if ((sinfo != NULL) && (filling_sinfo != 0)) {
5645 sinfo->sinfo_stream = control->sinfo_stream;
5646 sinfo->sinfo_ssn = (uint16_t)control->mid;
5647 sinfo->sinfo_flags = control->sinfo_flags;
5648 sinfo->sinfo_ppid = control->sinfo_ppid;
5649 sinfo->sinfo_context = control->sinfo_context;
5650 sinfo->sinfo_timetolive = control->sinfo_timetolive;
5651 sinfo->sinfo_tsn = control->sinfo_tsn;
5652 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5653 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5654 nxt = TAILQ_NEXT(control, next);
5655 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5656 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5657 struct sctp_extrcvinfo *s_extra;
5658
5659 s_extra = (struct sctp_extrcvinfo *)sinfo;
5660 if ((nxt) &&
5661 (nxt->length)) {
5662 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5663 if (nxt->sinfo_flags & SCTP_UNORDERED) {
5664 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5665 }
5666 if (nxt->spec_flags & M_NOTIFICATION) {
5667 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5668 }
5669 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5670 s_extra->serinfo_next_length = nxt->length;
5671 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5672 s_extra->serinfo_next_stream = nxt->sinfo_stream;
5673 if (nxt->tail_mbuf != NULL) {
5674 if (nxt->end_added) {
5675 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5676 }
5677 }
5678 } else {
5679 /*
5680 * we explicitly 0 this, since the memcpy
5681 * got some other things beyond the older
5682 * sinfo_ that is on the control's structure
5683 * :-D
5684 */
5685 nxt = NULL;
5686 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5687 s_extra->serinfo_next_aid = 0;
5688 s_extra->serinfo_next_length = 0;
5689 s_extra->serinfo_next_ppid = 0;
5690 s_extra->serinfo_next_stream = 0;
5691 }
5692 }
5693 /*
5694 * update off the real current cum-ack, if we have an stcb.
5695 */
5696 if ((control->do_not_ref_stcb == 0) && stcb)
5697 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5698 /*
5699 * mask off the high bits, we keep the actual chunk bits in
5700 * there.
5701 */
5702 sinfo->sinfo_flags &= 0x00ff;
5703 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5704 sinfo->sinfo_flags |= SCTP_UNORDERED;
5705 }
5706 }
5707 #ifdef SCTP_ASOCLOG_OF_TSNS
5708 {
5709 int index, newindex;
5710 struct sctp_pcbtsn_rlog *entry;
5711
5712 do {
5713 index = inp->readlog_index;
5714 newindex = index + 1;
5715 if (newindex >= SCTP_READ_LOG_SIZE) {
5716 newindex = 0;
5717 }
5718 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5719 entry = &inp->readlog[index];
5720 entry->vtag = control->sinfo_assoc_id;
5721 entry->strm = control->sinfo_stream;
5722 entry->seq = (uint16_t)control->mid;
5723 entry->sz = control->length;
5724 entry->flgs = control->sinfo_flags;
5725 }
5726 #endif
5727 if ((fromlen > 0) && (from != NULL)) {
5728 union sctp_sockstore store;
5729 size_t len;
5730
5731 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5732 #ifdef INET6
5733 case AF_INET6:
5734 len = sizeof(struct sockaddr_in6);
5735 store.sin6 = control->whoFrom->ro._l_addr.sin6;
5736 store.sin6.sin6_port = control->port_from;
5737 break;
5738 #endif
5739 #ifdef INET
5740 case AF_INET:
5741 #ifdef INET6
5742 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5743 len = sizeof(struct sockaddr_in6);
5744 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5745 &store.sin6);
5746 store.sin6.sin6_port = control->port_from;
5747 } else {
5748 len = sizeof(struct sockaddr_in);
5749 store.sin = control->whoFrom->ro._l_addr.sin;
5750 store.sin.sin_port = control->port_from;
5751 }
5752 #else
5753 len = sizeof(struct sockaddr_in);
5754 store.sin = control->whoFrom->ro._l_addr.sin;
5755 store.sin.sin_port = control->port_from;
5756 #endif
5757 break;
5758 #endif
5759 default:
5760 len = 0;
5761 break;
5762 }
5763 memcpy(from, &store, min((size_t)fromlen, len));
5764 #ifdef INET6
5765 {
5766 struct sockaddr_in6 lsa6, *from6;
5767
5768 from6 = (struct sockaddr_in6 *)from;
5769 sctp_recover_scope_mac(from6, (&lsa6));
5770 }
5771 #endif
5772 }
5773 if (hold_rlock) {
5774 SCTP_INP_READ_UNLOCK(inp);
5775 hold_rlock = 0;
5776 }
5777 if (hold_sblock) {
5778 SOCKBUF_UNLOCK(&so->so_rcv);
5779 hold_sblock = 0;
5780 }
5781 /* now copy out what data we can */
5782 if (mp == NULL) {
5783 /* copy out each mbuf in the chain up to length */
5784 get_more_data:
5785 m = control->data;
5786 while (m) {
5787 /* Move out all we can */
5788 cp_len = uio->uio_resid;
5789 my_len = SCTP_BUF_LEN(m);
5790 if (cp_len > my_len) {
5791 /* not enough in this buf */
5792 cp_len = my_len;
5793 }
5794 if (hold_rlock) {
5795 SCTP_INP_READ_UNLOCK(inp);
5796 hold_rlock = 0;
5797 }
5798 if (cp_len > 0)
5799 error = uiomove(mtod(m, char *), (int)cp_len, uio);
5800 /* re-read */
5801 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5802 goto release;
5803 }
5804
5805 if ((control->do_not_ref_stcb == 0) && stcb &&
5806 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5807 no_rcv_needed = 1;
5808 }
5809 if (error) {
5810 /* error we are out of here */
5811 goto release;
5812 }
5813 SCTP_INP_READ_LOCK(inp);
5814 hold_rlock = 1;
5815 if (cp_len == SCTP_BUF_LEN(m)) {
5816 if ((SCTP_BUF_NEXT(m) == NULL) &&
5817 (control->end_added)) {
5818 out_flags |= MSG_EOR;
5819 if ((control->do_not_ref_stcb == 0) &&
5820 (control->stcb != NULL) &&
5821 ((control->spec_flags & M_NOTIFICATION) == 0))
5822 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5823 }
5824 if (control->spec_flags & M_NOTIFICATION) {
5825 out_flags |= MSG_NOTIFICATION;
5826 }
5827 /* we ate up the mbuf */
5828 if (in_flags & MSG_PEEK) {
5829 /* just looking */
5830 m = SCTP_BUF_NEXT(m);
5831 copied_so_far += cp_len;
5832 } else {
5833 /* dispose of the mbuf */
5834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5835 sctp_sblog(&so->so_rcv,
5836 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5837 }
5838 sctp_sbfree(control, stcb, &so->so_rcv, m);
5839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5840 sctp_sblog(&so->so_rcv,
5841 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5842 }
5843 copied_so_far += cp_len;
5844 freed_so_far += (uint32_t)cp_len;
5845 freed_so_far += MSIZE;
5846 atomic_subtract_int(&control->length, cp_len);
5847 control->data = sctp_m_free(m);
5848 m = control->data;
5849 /*
5850 * been through it all, must hold sb
5851 * lock ok to null tail
5852 */
5853 if (control->data == NULL) {
5854 #ifdef INVARIANTS
5855 if ((control->end_added == 0) ||
5856 (TAILQ_NEXT(control, next) == NULL)) {
5857 /*
5858 * If the end is not
5859 * added, OR the
5860 * next is NOT null
5861 * we MUST have the
5862 * lock.
5863 */
5864 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5865 panic("Hmm we don't own the lock?");
5866 }
5867 }
5868 #endif
5869 control->tail_mbuf = NULL;
5870 #ifdef INVARIANTS
5871 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5872 panic("end_added, nothing left and no MSG_EOR");
5873 }
5874 #endif
5875 }
5876 }
5877 } else {
5878 /* Do we need to trim the mbuf? */
5879 if (control->spec_flags & M_NOTIFICATION) {
5880 out_flags |= MSG_NOTIFICATION;
5881 }
5882 if ((in_flags & MSG_PEEK) == 0) {
5883 SCTP_BUF_RESV_UF(m, cp_len);
5884 SCTP_BUF_LEN(m) -= (int)cp_len;
5885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5886 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5887 }
5888 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5889 if ((control->do_not_ref_stcb == 0) &&
5890 stcb) {
5891 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5892 }
5893 copied_so_far += cp_len;
5894 freed_so_far += (uint32_t)cp_len;
5895 freed_so_far += MSIZE;
5896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5897 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5898 SCTP_LOG_SBRESULT, 0);
5899 }
5900 atomic_subtract_int(&control->length, cp_len);
5901 } else {
5902 copied_so_far += cp_len;
5903 }
5904 }
5905 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5906 break;
5907 }
5908 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5909 (control->do_not_ref_stcb == 0) &&
5910 (freed_so_far >= rwnd_req)) {
5911 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5912 }
5913 } /* end while(m) */
5914 /*
5915 * At this point we have looked at it all and we either have
5916 * a MSG_EOR/or read all the user wants... <OR>
5917 * control->length == 0.
5918 */
5919 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5920 /* we are done with this control */
5921 if (control->length == 0) {
5922 if (control->data) {
5923 #ifdef INVARIANTS
5924 panic("control->data not null at read eor?");
5925 #else
5926 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5927 sctp_m_freem(control->data);
5928 control->data = NULL;
5929 #endif
5930 }
5931 done_with_control:
5932 if (hold_rlock == 0) {
5933 SCTP_INP_READ_LOCK(inp);
5934 hold_rlock = 1;
5935 }
5936 TAILQ_REMOVE(&inp->read_queue, control, next);
5937 /* Add back any hiddend data */
5938 if (control->held_length) {
5939 held_length = 0;
5940 control->held_length = 0;
5941 wakeup_read_socket = 1;
5942 }
5943 if (control->aux_data) {
5944 sctp_m_free(control->aux_data);
5945 control->aux_data = NULL;
5946 }
5947 no_rcv_needed = control->do_not_ref_stcb;
5948 sctp_free_remote_addr(control->whoFrom);
5949 control->data = NULL;
5950 #ifdef INVARIANTS
5951 if (control->on_strm_q) {
5952 panic("About to free ctl:%p so:%p and its in %d",
5953 control, so, control->on_strm_q);
5954 }
5955 #endif
5956 sctp_free_a_readq(stcb, control);
5957 control = NULL;
5958 if ((freed_so_far >= rwnd_req) &&
5959 (no_rcv_needed == 0))
5960 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5961
5962 } else {
5963 /*
5964 * The user did not read all of this
5965 * message, turn off the returned MSG_EOR
5966 * since we are leaving more behind on the
5967 * control to read.
5968 */
5969 #ifdef INVARIANTS
5970 if (control->end_added &&
5971 (control->data == NULL) &&
5972 (control->tail_mbuf == NULL)) {
5973 panic("Gak, control->length is corrupt?");
5974 }
5975 #endif
5976 no_rcv_needed = control->do_not_ref_stcb;
5977 out_flags &= ~MSG_EOR;
5978 }
5979 }
5980 if (out_flags & MSG_EOR) {
5981 goto release;
5982 }
5983 if ((uio->uio_resid == 0) ||
5984 ((in_eeor_mode) &&
5985 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5986 goto release;
5987 }
5988 /*
5989 * If I hit here the receiver wants more and this message is
5990 * NOT done (pd-api). So two questions. Can we block? if not
5991 * we are done. Did the user NOT set MSG_WAITALL?
5992 */
5993 if (block_allowed == 0) {
5994 goto release;
5995 }
5996 /*
5997 * We need to wait for more data a few things: - We don't
5998 * sbunlock() so we don't get someone else reading. - We
5999 * must be sure to account for the case where what is added
6000 * is NOT to our control when we wakeup.
6001 */
6002
6003 /*
6004 * Do we need to tell the transport a rwnd update might be
6005 * needed before we go to sleep?
6006 */
6007 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6008 ((freed_so_far >= rwnd_req) &&
6009 (control->do_not_ref_stcb == 0) &&
6010 (no_rcv_needed == 0))) {
6011 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6012 }
6013 wait_some_more:
6014 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6015 goto release;
6016 }
6017
6018 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6019 goto release;
6020
6021 if (hold_rlock == 1) {
6022 SCTP_INP_READ_UNLOCK(inp);
6023 hold_rlock = 0;
6024 }
6025 if (hold_sblock == 0) {
6026 SOCKBUF_LOCK(&so->so_rcv);
6027 hold_sblock = 1;
6028 }
6029 if ((copied_so_far) && (control->length == 0) &&
6030 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6031 goto release;
6032 }
6033 if (so->so_rcv.sb_cc <= control->held_length) {
6034 error = sbwait(&so->so_rcv);
6035 if (error) {
6036 goto release;
6037 }
6038 control->held_length = 0;
6039 }
6040 if (hold_sblock) {
6041 SOCKBUF_UNLOCK(&so->so_rcv);
6042 hold_sblock = 0;
6043 }
6044 if (control->length == 0) {
6045 /* still nothing here */
6046 if (control->end_added == 1) {
6047 /* he aborted, or is done i.e.did a shutdown */
6048 out_flags |= MSG_EOR;
6049 if (control->pdapi_aborted) {
6050 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6051 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6052
6053 out_flags |= MSG_TRUNC;
6054 } else {
6055 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6056 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6057 }
6058 goto done_with_control;
6059 }
6060 if (so->so_rcv.sb_cc > held_length) {
6061 control->held_length = so->so_rcv.sb_cc;
6062 held_length = 0;
6063 }
6064 goto wait_some_more;
6065 } else if (control->data == NULL) {
6066 /*
6067 * we must re-sync since data is probably being
6068 * added
6069 */
6070 SCTP_INP_READ_LOCK(inp);
6071 if ((control->length > 0) && (control->data == NULL)) {
6072 /*
6073 * big trouble.. we have the lock and its
6074 * corrupt?
6075 */
6076 #ifdef INVARIANTS
6077 panic("Impossible data==NULL length !=0");
6078 #endif
6079 out_flags |= MSG_EOR;
6080 out_flags |= MSG_TRUNC;
6081 control->length = 0;
6082 SCTP_INP_READ_UNLOCK(inp);
6083 goto done_with_control;
6084 }
6085 SCTP_INP_READ_UNLOCK(inp);
6086 /* We will fall around to get more data */
6087 }
6088 goto get_more_data;
6089 } else {
6090 /*-
6091 * Give caller back the mbuf chain,
6092 * store in uio_resid the length
6093 */
6094 wakeup_read_socket = 0;
6095 if ((control->end_added == 0) ||
6096 (TAILQ_NEXT(control, next) == NULL)) {
6097 /* Need to get rlock */
6098 if (hold_rlock == 0) {
6099 SCTP_INP_READ_LOCK(inp);
6100 hold_rlock = 1;
6101 }
6102 }
6103 if (control->end_added) {
6104 out_flags |= MSG_EOR;
6105 if ((control->do_not_ref_stcb == 0) &&
6106 (control->stcb != NULL) &&
6107 ((control->spec_flags & M_NOTIFICATION) == 0))
6108 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6109 }
6110 if (control->spec_flags & M_NOTIFICATION) {
6111 out_flags |= MSG_NOTIFICATION;
6112 }
6113 uio->uio_resid = control->length;
6114 *mp = control->data;
6115 m = control->data;
6116 while (m) {
6117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6118 sctp_sblog(&so->so_rcv,
6119 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6120 }
6121 sctp_sbfree(control, stcb, &so->so_rcv, m);
6122 freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6123 freed_so_far += MSIZE;
6124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6125 sctp_sblog(&so->so_rcv,
6126 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6127 }
6128 m = SCTP_BUF_NEXT(m);
6129 }
6130 control->data = control->tail_mbuf = NULL;
6131 control->length = 0;
6132 if (out_flags & MSG_EOR) {
6133 /* Done with this control */
6134 goto done_with_control;
6135 }
6136 }
6137 release:
6138 if (hold_rlock == 1) {
6139 SCTP_INP_READ_UNLOCK(inp);
6140 hold_rlock = 0;
6141 }
6142 if (hold_sblock == 1) {
6143 SOCKBUF_UNLOCK(&so->so_rcv);
6144 hold_sblock = 0;
6145 }
6146
6147 sbunlock(&so->so_rcv);
6148 sockbuf_lock = 0;
6149
6150 release_unlocked:
6151 if (hold_sblock) {
6152 SOCKBUF_UNLOCK(&so->so_rcv);
6153 hold_sblock = 0;
6154 }
6155 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6156 if ((freed_so_far >= rwnd_req) &&
6157 (control && (control->do_not_ref_stcb == 0)) &&
6158 (no_rcv_needed == 0))
6159 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6160 }
6161 out:
6162 if (msg_flags) {
6163 *msg_flags = out_flags;
6164 }
6165 if (((out_flags & MSG_EOR) == 0) &&
6166 ((in_flags & MSG_PEEK) == 0) &&
6167 (sinfo) &&
6168 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6169 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6170 struct sctp_extrcvinfo *s_extra;
6171
6172 s_extra = (struct sctp_extrcvinfo *)sinfo;
6173 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6174 }
6175 if (hold_rlock == 1) {
6176 SCTP_INP_READ_UNLOCK(inp);
6177 }
6178 if (hold_sblock) {
6179 SOCKBUF_UNLOCK(&so->so_rcv);
6180 }
6181 if (sockbuf_lock) {
6182 sbunlock(&so->so_rcv);
6183 }
6184
6185 if (freecnt_applied) {
6186 /*
6187 * The lock on the socket buffer protects us so the free
6188 * code will stop. But since we used the socketbuf lock and
6189 * the sender uses the tcb_lock to increment, we need to use
6190 * the atomic add to the refcnt.
6191 */
6192 if (stcb == NULL) {
6193 #ifdef INVARIANTS
6194 panic("stcb for refcnt has gone NULL?");
6195 goto stage_left;
6196 #else
6197 goto stage_left;
6198 #endif
6199 }
6200 /* Save the value back for next time */
6201 stcb->freed_by_sorcv_sincelast = freed_so_far;
6202 atomic_add_int(&stcb->asoc.refcnt, -1);
6203 }
6204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6205 if (stcb) {
6206 sctp_misc_ints(SCTP_SORECV_DONE,
6207 freed_so_far,
6208 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6209 stcb->asoc.my_rwnd,
6210 so->so_rcv.sb_cc);
6211 } else {
6212 sctp_misc_ints(SCTP_SORECV_DONE,
6213 freed_so_far,
6214 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6215 0,
6216 so->so_rcv.sb_cc);
6217 }
6218 }
6219 stage_left:
6220 if (wakeup_read_socket) {
6221 sctp_sorwakeup(inp, so);
6222 }
6223 return (error);
6224 }
6225
6226
6227 #ifdef SCTP_MBUF_LOGGING
6228 struct mbuf *
6229 sctp_m_free(struct mbuf *m)
6230 {
6231 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6232 sctp_log_mb(m, SCTP_MBUF_IFREE);
6233 }
6234 return (m_free(m));
6235 }
6236
6237 void
6238 sctp_m_freem(struct mbuf *mb)
6239 {
6240 while (mb != NULL)
6241 mb = sctp_m_free(mb);
6242 }
6243
6244 #endif
6245
6246 int
6247 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6248 {
6249 /*
6250 * Given a local address. For all associations that holds the
6251 * address, request a peer-set-primary.
6252 */
6253 struct sctp_ifa *ifa;
6254 struct sctp_laddr *wi;
6255
6256 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6257 if (ifa == NULL) {
6258 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6259 return (EADDRNOTAVAIL);
6260 }
6261 /*
6262 * Now that we have the ifa we must awaken the iterator with this
6263 * message.
6264 */
6265 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6266 if (wi == NULL) {
6267 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6268 return (ENOMEM);
6269 }
6270 /* Now incr the count and int wi structure */
6271 SCTP_INCR_LADDR_COUNT();
6272 memset(wi, 0, sizeof(*wi));
6273 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6274 wi->ifa = ifa;
6275 wi->action = SCTP_SET_PRIM_ADDR;
6276 atomic_add_int(&ifa->refcount, 1);
6277
6278 /* Now add it to the work queue */
6279 SCTP_WQ_ADDR_LOCK();
6280 /*
6281 * Should this really be a tailq? As it is we will process the
6282 * newest first :-0
6283 */
6284 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6285 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6286 (struct sctp_inpcb *)NULL,
6287 (struct sctp_tcb *)NULL,
6288 (struct sctp_nets *)NULL);
6289 SCTP_WQ_ADDR_UNLOCK();
6290 return (0);
6291 }
6292
6293
6294 int
6295 sctp_soreceive(struct socket *so,
6296 struct sockaddr **psa,
6297 struct uio *uio,
6298 struct mbuf **mp0,
6299 struct mbuf **controlp,
6300 int *flagsp)
6301 {
6302 int error, fromlen;
6303 uint8_t sockbuf[256];
6304 struct sockaddr *from;
6305 struct sctp_extrcvinfo sinfo;
6306 int filling_sinfo = 1;
6307 int flags;
6308 struct sctp_inpcb *inp;
6309
6310 inp = (struct sctp_inpcb *)so->so_pcb;
6311 /* pickup the assoc we are reading from */
6312 if (inp == NULL) {
6313 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6314 return (EINVAL);
6315 }
6316 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6317 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6318 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6319 (controlp == NULL)) {
6320 /* user does not want the sndrcv ctl */
6321 filling_sinfo = 0;
6322 }
6323 if (psa) {
6324 from = (struct sockaddr *)sockbuf;
6325 fromlen = sizeof(sockbuf);
6326 from->sa_len = 0;
6327 } else {
6328 from = NULL;
6329 fromlen = 0;
6330 }
6331
6332 if (filling_sinfo) {
6333 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6334 }
6335 if (flagsp != NULL) {
6336 flags = *flagsp;
6337 } else {
6338 flags = 0;
6339 }
6340 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6341 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6342 if (flagsp != NULL) {
6343 *flagsp = flags;
6344 }
6345 if (controlp != NULL) {
6346 /* copy back the sinfo in a CMSG format */
6347 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6348 *controlp = sctp_build_ctl_nchunk(inp,
6349 (struct sctp_sndrcvinfo *)&sinfo);
6350 } else {
6351 *controlp = NULL;
6352 }
6353 }
6354 if (psa) {
6355 /* copy back the address info */
6356 if (from && from->sa_len) {
6357 *psa = sodupsockaddr(from, M_NOWAIT);
6358 } else {
6359 *psa = NULL;
6360 }
6361 }
6362 return (error);
6363 }
6364
6365
6366
6367
6368
6369 int
6370 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6371 int totaddr, int *error)
6372 {
6373 int added = 0;
6374 int i;
6375 struct sctp_inpcb *inp;
6376 struct sockaddr *sa;
6377 size_t incr = 0;
6378 #ifdef INET
6379 struct sockaddr_in *sin;
6380 #endif
6381 #ifdef INET6
6382 struct sockaddr_in6 *sin6;
6383 #endif
6384
6385 sa = addr;
6386 inp = stcb->sctp_ep;
6387 *error = 0;
6388 for (i = 0; i < totaddr; i++) {
6389 switch (sa->sa_family) {
6390 #ifdef INET
6391 case AF_INET:
6392 incr = sizeof(struct sockaddr_in);
6393 sin = (struct sockaddr_in *)sa;
6394 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6395 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6396 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6397 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6398 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6399 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6400 *error = EINVAL;
6401 goto out_now;
6402 }
6403 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6404 SCTP_DONOT_SETSCOPE,
6405 SCTP_ADDR_IS_CONFIRMED)) {
6406 /* assoc gone no un-lock */
6407 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6408 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6409 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6410 *error = ENOBUFS;
6411 goto out_now;
6412 }
6413 added++;
6414 break;
6415 #endif
6416 #ifdef INET6
6417 case AF_INET6:
6418 incr = sizeof(struct sockaddr_in6);
6419 sin6 = (struct sockaddr_in6 *)sa;
6420 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6421 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6422 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6423 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6424 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6425 *error = EINVAL;
6426 goto out_now;
6427 }
6428 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6429 SCTP_DONOT_SETSCOPE,
6430 SCTP_ADDR_IS_CONFIRMED)) {
6431 /* assoc gone no un-lock */
6432 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6433 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6434 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6435 *error = ENOBUFS;
6436 goto out_now;
6437 }
6438 added++;
6439 break;
6440 #endif
6441 default:
6442 break;
6443 }
6444 sa = (struct sockaddr *)((caddr_t)sa + incr);
6445 }
6446 out_now:
6447 return (added);
6448 }
6449
6450 int
6451 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6452 unsigned int totaddr,
6453 unsigned int *num_v4, unsigned int *num_v6,
6454 unsigned int limit)
6455 {
6456 struct sockaddr *sa;
6457 struct sctp_tcb *stcb;
6458 unsigned int incr, at, i;
6459
6460 at = 0;
6461 sa = addr;
6462 *num_v6 = *num_v4 = 0;
6463 /* account and validate addresses */
6464 if (totaddr == 0) {
6465 return (EINVAL);
6466 }
6467 for (i = 0; i < totaddr; i++) {
6468 if (at + sizeof(struct sockaddr) > limit) {
6469 return (EINVAL);
6470 }
6471 switch (sa->sa_family) {
6472 #ifdef INET
6473 case AF_INET:
6474 incr = (unsigned int)sizeof(struct sockaddr_in);
6475 if (sa->sa_len != incr) {
6476 return (EINVAL);
6477 }
6478 (*num_v4) += 1;
6479 break;
6480 #endif
6481 #ifdef INET6
6482 case AF_INET6:
6483 {
6484 struct sockaddr_in6 *sin6;
6485
6486 sin6 = (struct sockaddr_in6 *)sa;
6487 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6488 /* Must be non-mapped for connectx */
6489 return (EINVAL);
6490 }
6491 incr = (unsigned int)sizeof(struct sockaddr_in6);
6492 if (sa->sa_len != incr) {
6493 return (EINVAL);
6494 }
6495 (*num_v6) += 1;
6496 break;
6497 }
6498 #endif
6499 default:
6500 return (EINVAL);
6501 }
6502 if ((at + incr) > limit) {
6503 return (EINVAL);
6504 }
6505 SCTP_INP_INCR_REF(inp);
6506 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6507 if (stcb != NULL) {
6508 SCTP_TCB_UNLOCK(stcb);
6509 return (EALREADY);
6510 } else {
6511 SCTP_INP_DECR_REF(inp);
6512 }
6513 at += incr;
6514 sa = (struct sockaddr *)((caddr_t)sa + incr);
6515 }
6516 return (0);
6517 }
6518
6519 /*
6520 * sctp_bindx(ADD) for one address.
6521 * assumes all arguments are valid/checked by caller.
6522 */
6523 void
6524 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6525 struct sockaddr *sa, sctp_assoc_t assoc_id,
6526 uint32_t vrf_id, int *error, void *p)
6527 {
6528 struct sockaddr *addr_touse;
6529 #if defined(INET) && defined(INET6)
6530 struct sockaddr_in sin;
6531 #endif
6532
6533 /* see if we're bound all already! */
6534 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6535 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6536 *error = EINVAL;
6537 return;
6538 }
6539 addr_touse = sa;
6540 #ifdef INET6
6541 if (sa->sa_family == AF_INET6) {
6542 #ifdef INET
6543 struct sockaddr_in6 *sin6;
6544
6545 #endif
6546 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6547 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 *error = EINVAL;
6549 return;
6550 }
6551 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6552 /* can only bind v6 on PF_INET6 sockets */
6553 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554 *error = EINVAL;
6555 return;
6556 }
6557 #ifdef INET
6558 sin6 = (struct sockaddr_in6 *)addr_touse;
6559 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6560 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6561 SCTP_IPV6_V6ONLY(inp)) {
6562 /* can't bind v4-mapped on PF_INET sockets */
6563 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564 *error = EINVAL;
6565 return;
6566 }
6567 in6_sin6_2_sin(&sin, sin6);
6568 addr_touse = (struct sockaddr *)&sin;
6569 }
6570 #endif
6571 }
6572 #endif
6573 #ifdef INET
6574 if (sa->sa_family == AF_INET) {
6575 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6576 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 *error = EINVAL;
6578 return;
6579 }
6580 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6581 SCTP_IPV6_V6ONLY(inp)) {
6582 /* can't bind v4 on PF_INET sockets */
6583 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6584 *error = EINVAL;
6585 return;
6586 }
6587 }
6588 #endif
6589 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6590 if (p == NULL) {
6591 /* Can't get proc for Net/Open BSD */
6592 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6593 *error = EINVAL;
6594 return;
6595 }
6596 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6597 return;
6598 }
6599 /*
6600 * No locks required here since bind and mgmt_ep_sa all do their own
6601 * locking. If we do something for the FIX: below we may need to
6602 * lock in that case.
6603 */
6604 if (assoc_id == 0) {
6605 /* add the address */
6606 struct sctp_inpcb *lep;
6607 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6608
6609 /* validate the incoming port */
6610 if ((lsin->sin_port != 0) &&
6611 (lsin->sin_port != inp->sctp_lport)) {
6612 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6613 *error = EINVAL;
6614 return;
6615 } else {
6616 /* user specified 0 port, set it to existing port */
6617 lsin->sin_port = inp->sctp_lport;
6618 }
6619
6620 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6621 if (lep != NULL) {
6622 /*
6623 * We must decrement the refcount since we have the
6624 * ep already and are binding. No remove going on
6625 * here.
6626 */
6627 SCTP_INP_DECR_REF(lep);
6628 }
6629 if (lep == inp) {
6630 /* already bound to it.. ok */
6631 return;
6632 } else if (lep == NULL) {
6633 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
6634 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6635 SCTP_ADD_IP_ADDRESS,
6636 vrf_id, NULL);
6637 } else {
6638 *error = EADDRINUSE;
6639 }
6640 if (*error)
6641 return;
6642 } else {
6643 /*
6644 * FIX: decide whether we allow assoc based bindx
6645 */
6646 }
6647 }
6648
6649 /*
6650 * sctp_bindx(DELETE) for one address.
6651 * assumes all arguments are valid/checked by caller.
6652 */
6653 void
6654 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6655 struct sockaddr *sa, sctp_assoc_t assoc_id,
6656 uint32_t vrf_id, int *error)
6657 {
6658 struct sockaddr *addr_touse;
6659 #if defined(INET) && defined(INET6)
6660 struct sockaddr_in sin;
6661 #endif
6662
6663 /* see if we're bound all already! */
6664 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6665 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6666 *error = EINVAL;
6667 return;
6668 }
6669 addr_touse = sa;
6670 #ifdef INET6
6671 if (sa->sa_family == AF_INET6) {
6672 #ifdef INET
6673 struct sockaddr_in6 *sin6;
6674 #endif
6675
6676 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6677 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6678 *error = EINVAL;
6679 return;
6680 }
6681 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6682 /* can only bind v6 on PF_INET6 sockets */
6683 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6684 *error = EINVAL;
6685 return;
6686 }
6687 #ifdef INET
6688 sin6 = (struct sockaddr_in6 *)addr_touse;
6689 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6690 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6691 SCTP_IPV6_V6ONLY(inp)) {
6692 /* can't bind mapped-v4 on PF_INET sockets */
6693 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6694 *error = EINVAL;
6695 return;
6696 }
6697 in6_sin6_2_sin(&sin, sin6);
6698 addr_touse = (struct sockaddr *)&sin;
6699 }
6700 #endif
6701 }
6702 #endif
6703 #ifdef INET
6704 if (sa->sa_family == AF_INET) {
6705 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6706 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6707 *error = EINVAL;
6708 return;
6709 }
6710 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6711 SCTP_IPV6_V6ONLY(inp)) {
6712 /* can't bind v4 on PF_INET sockets */
6713 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6714 *error = EINVAL;
6715 return;
6716 }
6717 }
6718 #endif
6719 /*
6720 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6721 * below is ever changed we may need to lock before calling
6722 * association level binding.
6723 */
6724 if (assoc_id == 0) {
6725 /* delete the address */
6726 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6727 SCTP_DEL_IP_ADDRESS,
6728 vrf_id, NULL);
6729 } else {
6730 /*
6731 * FIX: decide whether we allow assoc based bindx
6732 */
6733 }
6734 }
6735
6736 /*
6737 * returns the valid local address count for an assoc, taking into account
6738 * all scoping rules
6739 */
6740 int
6741 sctp_local_addr_count(struct sctp_tcb *stcb)
6742 {
6743 int loopback_scope;
6744 #if defined(INET)
6745 int ipv4_local_scope, ipv4_addr_legal;
6746 #endif
6747 #if defined (INET6)
6748 int local_scope, site_scope, ipv6_addr_legal;
6749 #endif
6750 struct sctp_vrf *vrf;
6751 struct sctp_ifn *sctp_ifn;
6752 struct sctp_ifa *sctp_ifa;
6753 int count = 0;
6754
6755 /* Turn on all the appropriate scopes */
6756 loopback_scope = stcb->asoc.scope.loopback_scope;
6757 #if defined(INET)
6758 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6759 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6760 #endif
6761 #if defined(INET6)
6762 local_scope = stcb->asoc.scope.local_scope;
6763 site_scope = stcb->asoc.scope.site_scope;
6764 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6765 #endif
6766 SCTP_IPI_ADDR_RLOCK();
6767 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6768 if (vrf == NULL) {
6769 /* no vrf, no addresses */
6770 SCTP_IPI_ADDR_RUNLOCK();
6771 return (0);
6772 }
6773
6774 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6775 /*
6776 * bound all case: go through all ifns on the vrf
6777 */
6778 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6779 if ((loopback_scope == 0) &&
6780 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6781 continue;
6782 }
6783 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6784 if (sctp_is_addr_restricted(stcb, sctp_ifa))
6785 continue;
6786 switch (sctp_ifa->address.sa.sa_family) {
6787 #ifdef INET
6788 case AF_INET:
6789 if (ipv4_addr_legal) {
6790 struct sockaddr_in *sin;
6791
6792 sin = &sctp_ifa->address.sin;
6793 if (sin->sin_addr.s_addr == 0) {
6794 /*
6795 * skip unspecified
6796 * addrs
6797 */
6798 continue;
6799 }
6800 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6801 &sin->sin_addr) != 0) {
6802 continue;
6803 }
6804 if ((ipv4_local_scope == 0) &&
6805 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6806 continue;
6807 }
6808 /* count this one */
6809 count++;
6810 } else {
6811 continue;
6812 }
6813 break;
6814 #endif
6815 #ifdef INET6
6816 case AF_INET6:
6817 if (ipv6_addr_legal) {
6818 struct sockaddr_in6 *sin6;
6819
6820 sin6 = &sctp_ifa->address.sin6;
6821 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6822 continue;
6823 }
6824 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6825 &sin6->sin6_addr) != 0) {
6826 continue;
6827 }
6828 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6829 if (local_scope == 0)
6830 continue;
6831 if (sin6->sin6_scope_id == 0) {
6832 if (sa6_recoverscope(sin6) != 0)
6833 /*
6834 *
6835 * bad
6836 * link
6837 *
6838 * local
6839 *
6840 * address
6841 */
6842 continue;
6843 }
6844 }
6845 if ((site_scope == 0) &&
6846 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6847 continue;
6848 }
6849 /* count this one */
6850 count++;
6851 }
6852 break;
6853 #endif
6854 default:
6855 /* TSNH */
6856 break;
6857 }
6858 }
6859 }
6860 } else {
6861 /*
6862 * subset bound case
6863 */
6864 struct sctp_laddr *laddr;
6865
6866 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6867 sctp_nxt_addr) {
6868 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6869 continue;
6870 }
6871 /* count this one */
6872 count++;
6873 }
6874 }
6875 SCTP_IPI_ADDR_RUNLOCK();
6876 return (count);
6877 }
6878
6879 #if defined(SCTP_LOCAL_TRACE_BUF)
6880
6881 void
6882 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6883 {
6884 uint32_t saveindex, newindex;
6885
6886 do {
6887 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6888 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6889 newindex = 1;
6890 } else {
6891 newindex = saveindex + 1;
6892 }
6893 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6894 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6895 saveindex = 0;
6896 }
6897 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6898 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6899 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6900 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6901 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6902 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6903 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6904 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6905 }
6906
6907 #endif
6908 static void
6909 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6910 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6911 {
6912 struct ip *iph;
6913 #ifdef INET6
6914 struct ip6_hdr *ip6;
6915 #endif
6916 struct mbuf *sp, *last;
6917 struct udphdr *uhdr;
6918 uint16_t port;
6919
6920 if ((m->m_flags & M_PKTHDR) == 0) {
6921 /* Can't handle one that is not a pkt hdr */
6922 goto out;
6923 }
6924 /* Pull the src port */
6925 iph = mtod(m, struct ip *);
6926 uhdr = (struct udphdr *)((caddr_t)iph + off);
6927 port = uhdr->uh_sport;
6928 /*
6929 * Split out the mbuf chain. Leave the IP header in m, place the
6930 * rest in the sp.
6931 */
6932 sp = m_split(m, off, M_NOWAIT);
6933 if (sp == NULL) {
6934 /* Gak, drop packet, we can't do a split */
6935 goto out;
6936 }
6937 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6938 /* Gak, packet can't have an SCTP header in it - too small */
6939 m_freem(sp);
6940 goto out;
6941 }
6942 /* Now pull up the UDP header and SCTP header together */
6943 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6944 if (sp == NULL) {
6945 /* Gak pullup failed */
6946 goto out;
6947 }
6948 /* Trim out the UDP header */
6949 m_adj(sp, sizeof(struct udphdr));
6950
6951 /* Now reconstruct the mbuf chain */
6952 for (last = m; last->m_next; last = last->m_next);
6953 last->m_next = sp;
6954 m->m_pkthdr.len += sp->m_pkthdr.len;
6955 /*
6956 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6957 * checksum and it was valid. Since CSUM_DATA_VALID ==
6958 * CSUM_SCTP_VALID this would imply that the HW also verified the
6959 * SCTP checksum. Therefore, clear the bit.
6960 */
6961 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6962 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6963 m->m_pkthdr.len,
6964 if_name(m->m_pkthdr.rcvif),
6965 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6966 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6967 iph = mtod(m, struct ip *);
6968 switch (iph->ip_v) {
6969 #ifdef INET
6970 case IPVERSION:
6971 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6972 sctp_input_with_port(m, off, port);
6973 break;
6974 #endif
6975 #ifdef INET6
6976 case IPV6_VERSION >> 4:
6977 ip6 = mtod(m, struct ip6_hdr *);
6978 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6979 sctp6_input_with_port(&m, &off, port);
6980 break;
6981 #endif
6982 default:
6983 goto out;
6984 break;
6985 }
6986 return;
6987 out:
6988 m_freem(m);
6989 }
6990
6991 #ifdef INET
6992 static void
6993 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6994 {
6995 struct ip *outer_ip, *inner_ip;
6996 struct sctphdr *sh;
6997 struct icmp *icmp;
6998 struct udphdr *udp;
6999 struct sctp_inpcb *inp;
7000 struct sctp_tcb *stcb;
7001 struct sctp_nets *net;
7002 struct sctp_init_chunk *ch;
7003 struct sockaddr_in src, dst;
7004 uint8_t type, code;
7005
7006 inner_ip = (struct ip *)vip;
7007 icmp = (struct icmp *)((caddr_t)inner_ip -
7008 (sizeof(struct icmp) - sizeof(struct ip)));
7009 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7010 if (ntohs(outer_ip->ip_len) <
7011 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7012 return;
7013 }
7014 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7015 sh = (struct sctphdr *)(udp + 1);
7016 memset(&src, 0, sizeof(struct sockaddr_in));
7017 src.sin_family = AF_INET;
7018 src.sin_len = sizeof(struct sockaddr_in);
7019 src.sin_port = sh->src_port;
7020 src.sin_addr = inner_ip->ip_src;
7021 memset(&dst, 0, sizeof(struct sockaddr_in));
7022 dst.sin_family = AF_INET;
7023 dst.sin_len = sizeof(struct sockaddr_in);
7024 dst.sin_port = sh->dest_port;
7025 dst.sin_addr = inner_ip->ip_dst;
7026 /*
7027 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7028 * holds our local endpoint address. Thus we reverse the dst and the
7029 * src in the lookup.
7030 */
7031 inp = NULL;
7032 net = NULL;
7033 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7034 (struct sockaddr *)&src,
7035 &inp, &net, 1,
7036 SCTP_DEFAULT_VRFID);
7037 if ((stcb != NULL) &&
7038 (net != NULL) &&
7039 (inp != NULL)) {
7040 /* Check the UDP port numbers */
7041 if ((udp->uh_dport != net->port) ||
7042 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7043 SCTP_TCB_UNLOCK(stcb);
7044 return;
7045 }
7046 /* Check the verification tag */
7047 if (ntohl(sh->v_tag) != 0) {
7048 /*
7049 * This must be the verification tag used for
7050 * sending out packets. We don't consider packets
7051 * reflecting the verification tag.
7052 */
7053 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7054 SCTP_TCB_UNLOCK(stcb);
7055 return;
7056 }
7057 } else {
7058 if (ntohs(outer_ip->ip_len) >=
7059 sizeof(struct ip) +
7060 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7061 /*
7062 * In this case we can check if we got an
7063 * INIT chunk and if the initiate tag
7064 * matches.
7065 */
7066 ch = (struct sctp_init_chunk *)(sh + 1);
7067 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7068 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7069 SCTP_TCB_UNLOCK(stcb);
7070 return;
7071 }
7072 } else {
7073 SCTP_TCB_UNLOCK(stcb);
7074 return;
7075 }
7076 }
7077 type = icmp->icmp_type;
7078 code = icmp->icmp_code;
7079 if ((type == ICMP_UNREACH) &&
7080 (code == ICMP_UNREACH_PORT)) {
7081 code = ICMP_UNREACH_PROTOCOL;
7082 }
7083 sctp_notify(inp, stcb, net, type, code,
7084 ntohs(inner_ip->ip_len),
7085 (uint32_t)ntohs(icmp->icmp_nextmtu));
7086 } else {
7087 if ((stcb == NULL) && (inp != NULL)) {
7088 /* reduce ref-count */
7089 SCTP_INP_WLOCK(inp);
7090 SCTP_INP_DECR_REF(inp);
7091 SCTP_INP_WUNLOCK(inp);
7092 }
7093 if (stcb) {
7094 SCTP_TCB_UNLOCK(stcb);
7095 }
7096 }
7097 return;
7098 }
7099 #endif
7100
7101 #ifdef INET6
7102 static void
7103 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7104 {
7105 struct ip6ctlparam *ip6cp;
7106 struct sctp_inpcb *inp;
7107 struct sctp_tcb *stcb;
7108 struct sctp_nets *net;
7109 struct sctphdr sh;
7110 struct udphdr udp;
7111 struct sockaddr_in6 src, dst;
7112 uint8_t type, code;
7113
7114 ip6cp = (struct ip6ctlparam *)d;
7115 /*
7116 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7117 */
7118 if (ip6cp->ip6c_m == NULL) {
7119 return;
7120 }
7121 /*
7122 * Check if we can safely examine the ports and the verification tag
7123 * of the SCTP common header.
7124 */
7125 if (ip6cp->ip6c_m->m_pkthdr.len <
7126 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7127 return;
7128 }
7129 /* Copy out the UDP header. */
7130 memset(&udp, 0, sizeof(struct udphdr));
7131 m_copydata(ip6cp->ip6c_m,
7132 ip6cp->ip6c_off,
7133 sizeof(struct udphdr),
7134 (caddr_t)&udp);
7135 /* Copy out the port numbers and the verification tag. */
7136 memset(&sh, 0, sizeof(struct sctphdr));
7137 m_copydata(ip6cp->ip6c_m,
7138 ip6cp->ip6c_off + sizeof(struct udphdr),
7139 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7140 (caddr_t)&sh);
7141 memset(&src, 0, sizeof(struct sockaddr_in6));
7142 src.sin6_family = AF_INET6;
7143 src.sin6_len = sizeof(struct sockaddr_in6);
7144 src.sin6_port = sh.src_port;
7145 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7146 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7147 return;
7148 }
7149 memset(&dst, 0, sizeof(struct sockaddr_in6));
7150 dst.sin6_family = AF_INET6;
7151 dst.sin6_len = sizeof(struct sockaddr_in6);
7152 dst.sin6_port = sh.dest_port;
7153 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7154 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7155 return;
7156 }
7157 inp = NULL;
7158 net = NULL;
7159 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7160 (struct sockaddr *)&src,
7161 &inp, &net, 1, SCTP_DEFAULT_VRFID);
7162 if ((stcb != NULL) &&
7163 (net != NULL) &&
7164 (inp != NULL)) {
7165 /* Check the UDP port numbers */
7166 if ((udp.uh_dport != net->port) ||
7167 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7168 SCTP_TCB_UNLOCK(stcb);
7169 return;
7170 }
7171 /* Check the verification tag */
7172 if (ntohl(sh.v_tag) != 0) {
7173 /*
7174 * This must be the verification tag used for
7175 * sending out packets. We don't consider packets
7176 * reflecting the verification tag.
7177 */
7178 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7179 SCTP_TCB_UNLOCK(stcb);
7180 return;
7181 }
7182 } else {
7183 if (ip6cp->ip6c_m->m_pkthdr.len >=
7184 ip6cp->ip6c_off + sizeof(struct udphdr) +
7185 sizeof(struct sctphdr) +
7186 sizeof(struct sctp_chunkhdr) +
7187 offsetof(struct sctp_init, a_rwnd)) {
7188 /*
7189 * In this case we can check if we got an
7190 * INIT chunk and if the initiate tag
7191 * matches.
7192 */
7193 uint32_t initiate_tag;
7194 uint8_t chunk_type;
7195
7196 m_copydata(ip6cp->ip6c_m,
7197 ip6cp->ip6c_off +
7198 sizeof(struct udphdr) +
7199 sizeof(struct sctphdr),
7200 sizeof(uint8_t),
7201 (caddr_t)&chunk_type);
7202 m_copydata(ip6cp->ip6c_m,
7203 ip6cp->ip6c_off +
7204 sizeof(struct udphdr) +
7205 sizeof(struct sctphdr) +
7206 sizeof(struct sctp_chunkhdr),
7207 sizeof(uint32_t),
7208 (caddr_t)&initiate_tag);
7209 if ((chunk_type != SCTP_INITIATION) ||
7210 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7211 SCTP_TCB_UNLOCK(stcb);
7212 return;
7213 }
7214 } else {
7215 SCTP_TCB_UNLOCK(stcb);
7216 return;
7217 }
7218 }
7219 type = ip6cp->ip6c_icmp6->icmp6_type;
7220 code = ip6cp->ip6c_icmp6->icmp6_code;
7221 if ((type == ICMP6_DST_UNREACH) &&
7222 (code == ICMP6_DST_UNREACH_NOPORT)) {
7223 type = ICMP6_PARAM_PROB;
7224 code = ICMP6_PARAMPROB_NEXTHEADER;
7225 }
7226 sctp6_notify(inp, stcb, net, type, code,
7227 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7228 } else {
7229 if ((stcb == NULL) && (inp != NULL)) {
7230 /* reduce inp's ref-count */
7231 SCTP_INP_WLOCK(inp);
7232 SCTP_INP_DECR_REF(inp);
7233 SCTP_INP_WUNLOCK(inp);
7234 }
7235 if (stcb) {
7236 SCTP_TCB_UNLOCK(stcb);
7237 }
7238 }
7239 }
7240 #endif
7241
7242 void
7243 sctp_over_udp_stop(void)
7244 {
7245 /*
7246 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7247 * for writting!
7248 */
7249 #ifdef INET
7250 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7251 soclose(SCTP_BASE_INFO(udp4_tun_socket));
7252 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7253 }
7254 #endif
7255 #ifdef INET6
7256 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7257 soclose(SCTP_BASE_INFO(udp6_tun_socket));
7258 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7259 }
7260 #endif
7261 }
7262
7263 int
7264 sctp_over_udp_start(void)
7265 {
7266 uint16_t port;
7267 int ret;
7268 #ifdef INET
7269 struct sockaddr_in sin;
7270 #endif
7271 #ifdef INET6
7272 struct sockaddr_in6 sin6;
7273 #endif
7274 /*
7275 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7276 * for writting!
7277 */
7278 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7279 if (ntohs(port) == 0) {
7280 /* Must have a port set */
7281 return (EINVAL);
7282 }
7283 #ifdef INET
7284 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7285 /* Already running -- must stop first */
7286 return (EALREADY);
7287 }
7288 #endif
7289 #ifdef INET6
7290 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7291 /* Already running -- must stop first */
7292 return (EALREADY);
7293 }
7294 #endif
7295 #ifdef INET
7296 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7297 SOCK_DGRAM, IPPROTO_UDP,
7298 curthread->td_ucred, curthread))) {
7299 sctp_over_udp_stop();
7300 return (ret);
7301 }
7302 /* Call the special UDP hook. */
7303 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7304 sctp_recv_udp_tunneled_packet,
7305 sctp_recv_icmp_tunneled_packet,
7306 NULL))) {
7307 sctp_over_udp_stop();
7308 return (ret);
7309 }
7310 /* Ok, we have a socket, bind it to the port. */
7311 memset(&sin, 0, sizeof(struct sockaddr_in));
7312 sin.sin_len = sizeof(struct sockaddr_in);
7313 sin.sin_family = AF_INET;
7314 sin.sin_port = htons(port);
7315 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7316 (struct sockaddr *)&sin, curthread))) {
7317 sctp_over_udp_stop();
7318 return (ret);
7319 }
7320 #endif
7321 #ifdef INET6
7322 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7323 SOCK_DGRAM, IPPROTO_UDP,
7324 curthread->td_ucred, curthread))) {
7325 sctp_over_udp_stop();
7326 return (ret);
7327 }
7328 /* Call the special UDP hook. */
7329 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7330 sctp_recv_udp_tunneled_packet,
7331 sctp_recv_icmp6_tunneled_packet,
7332 NULL))) {
7333 sctp_over_udp_stop();
7334 return (ret);
7335 }
7336 /* Ok, we have a socket, bind it to the port. */
7337 memset(&sin6, 0, sizeof(struct sockaddr_in6));
7338 sin6.sin6_len = sizeof(struct sockaddr_in6);
7339 sin6.sin6_family = AF_INET6;
7340 sin6.sin6_port = htons(port);
7341 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7342 (struct sockaddr *)&sin6, curthread))) {
7343 sctp_over_udp_stop();
7344 return (ret);
7345 }
7346 #endif
7347 return (0);
7348 }
7349
7350 #if defined(INET6) || defined(INET)
7351
7352 /*
7353 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7354 * If all arguments are zero, zero is returned.
7355 */
7356 uint32_t
7357 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7358 {
7359 if (mtu1 > 0) {
7360 if (mtu2 > 0) {
7361 if (mtu3 > 0) {
7362 return (min(mtu1, min(mtu2, mtu3)));
7363 } else {
7364 return (min(mtu1, mtu2));
7365 }
7366 } else {
7367 if (mtu3 > 0) {
7368 return (min(mtu1, mtu3));
7369 } else {
7370 return (mtu1);
7371 }
7372 }
7373 } else {
7374 if (mtu2 > 0) {
7375 if (mtu3 > 0) {
7376 return (min(mtu2, mtu3));
7377 } else {
7378 return (mtu2);
7379 }
7380 } else {
7381 return (mtu3);
7382 }
7383 }
7384 }
7385
7386 void
7387 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7388 {
7389 struct in_conninfo inc;
7390
7391 memset(&inc, 0, sizeof(struct in_conninfo));
7392 inc.inc_fibnum = fibnum;
7393 switch (addr->sa.sa_family) {
7394 #ifdef INET
7395 case AF_INET:
7396 inc.inc_faddr = addr->sin.sin_addr;
7397 break;
7398 #endif
7399 #ifdef INET6
7400 case AF_INET6:
7401 inc.inc_flags |= INC_ISIPV6;
7402 inc.inc6_faddr = addr->sin6.sin6_addr;
7403 break;
7404 #endif
7405 default:
7406 return;
7407 }
7408 tcp_hc_updatemtu(&inc, (u_long)mtu);
7409 }
7410
7411 uint32_t
7412 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7413 {
7414 struct in_conninfo inc;
7415
7416 memset(&inc, 0, sizeof(struct in_conninfo));
7417 inc.inc_fibnum = fibnum;
7418 switch (addr->sa.sa_family) {
7419 #ifdef INET
7420 case AF_INET:
7421 inc.inc_faddr = addr->sin.sin_addr;
7422 break;
7423 #endif
7424 #ifdef INET6
7425 case AF_INET6:
7426 inc.inc_flags |= INC_ISIPV6;
7427 inc.inc6_faddr = addr->sin6.sin6_addr;
7428 break;
7429 #endif
7430 default:
7431 return (0);
7432 }
7433 return ((uint32_t)tcp_hc_getmtu(&inc));
7434 }
7435 #endif
Cache object: 3582d859cecfb332cfd526488d17d4a3
|