1 /*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.2/sys/netinet/sctputil.c 334441 2018-05-31 16:14:45Z tuexen $");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61
62
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 struct sctp_cwnd_log sctp_clog;
74
75 sctp_clog.x.sb.stcb = stcb;
76 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 if (stcb)
78 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 else
80 sctp_clog.x.sb.stcb_sbcc = 0;
81 sctp_clog.x.sb.incr = incr;
82 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 SCTP_LOG_EVENT_SB,
84 from,
85 sctp_clog.x.misc.log1,
86 sctp_clog.x.misc.log2,
87 sctp_clog.x.misc.log3,
88 sctp_clog.x.misc.log4);
89 }
90
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 struct sctp_cwnd_log sctp_clog;
95
96 sctp_clog.x.close.inp = (void *)inp;
97 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 if (stcb) {
99 sctp_clog.x.close.stcb = (void *)stcb;
100 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 } else {
102 sctp_clog.x.close.stcb = 0;
103 sctp_clog.x.close.state = 0;
104 }
105 sctp_clog.x.close.loc = loc;
106 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 SCTP_LOG_EVENT_CLOSE,
108 0,
109 sctp_clog.x.misc.log1,
110 sctp_clog.x.misc.log2,
111 sctp_clog.x.misc.log3,
112 sctp_clog.x.misc.log4);
113 }
114
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 struct sctp_cwnd_log sctp_clog;
119
120 memset(&sctp_clog, 0, sizeof(sctp_clog));
121 sctp_clog.x.rto.net = (void *)net;
122 sctp_clog.x.rto.rtt = net->rtt / 1000;
123 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 SCTP_LOG_EVENT_RTT,
125 from,
126 sctp_clog.x.misc.log1,
127 sctp_clog.x.misc.log2,
128 sctp_clog.x.misc.log3,
129 sctp_clog.x.misc.log4);
130 }
131
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 struct sctp_cwnd_log sctp_clog;
136
137 sctp_clog.x.strlog.stcb = stcb;
138 sctp_clog.x.strlog.n_tsn = tsn;
139 sctp_clog.x.strlog.n_sseq = sseq;
140 sctp_clog.x.strlog.e_tsn = 0;
141 sctp_clog.x.strlog.e_sseq = 0;
142 sctp_clog.x.strlog.strm = stream;
143 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 SCTP_LOG_EVENT_STRM,
145 from,
146 sctp_clog.x.misc.log1,
147 sctp_clog.x.misc.log2,
148 sctp_clog.x.misc.log3,
149 sctp_clog.x.misc.log4);
150 }
151
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 struct sctp_cwnd_log sctp_clog;
156
157 sctp_clog.x.nagle.stcb = (void *)stcb;
158 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 SCTP_LOG_EVENT_NAGLE,
164 action,
165 sctp_clog.x.misc.log1,
166 sctp_clog.x.misc.log2,
167 sctp_clog.x.misc.log3,
168 sctp_clog.x.misc.log4);
169 }
170
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 struct sctp_cwnd_log sctp_clog;
175
176 sctp_clog.x.sack.cumack = cumack;
177 sctp_clog.x.sack.oldcumack = old_cumack;
178 sctp_clog.x.sack.tsn = tsn;
179 sctp_clog.x.sack.numGaps = gaps;
180 sctp_clog.x.sack.numDups = dups;
181 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 SCTP_LOG_EVENT_SACK,
183 from,
184 sctp_clog.x.misc.log1,
185 sctp_clog.x.misc.log2,
186 sctp_clog.x.misc.log3,
187 sctp_clog.x.misc.log4);
188 }
189
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 struct sctp_cwnd_log sctp_clog;
194
195 memset(&sctp_clog, 0, sizeof(sctp_clog));
196 sctp_clog.x.map.base = map;
197 sctp_clog.x.map.cum = cum;
198 sctp_clog.x.map.high = high;
199 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 SCTP_LOG_EVENT_MAP,
201 from,
202 sctp_clog.x.misc.log1,
203 sctp_clog.x.misc.log2,
204 sctp_clog.x.misc.log3,
205 sctp_clog.x.misc.log4);
206 }
207
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 struct sctp_cwnd_log sctp_clog;
212
213 memset(&sctp_clog, 0, sizeof(sctp_clog));
214 sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 sctp_clog.x.fr.tsn = tsn;
217 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 SCTP_LOG_EVENT_FR,
219 from,
220 sctp_clog.x.misc.log1,
221 sctp_clog.x.misc.log2,
222 sctp_clog.x.misc.log3,
223 sctp_clog.x.misc.log4);
224 }
225
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 struct sctp_cwnd_log sctp_clog;
231
232 sctp_clog.x.mb.mp = m;
233 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 if (SCTP_BUF_IS_EXTENDED(m)) {
237 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 } else {
240 sctp_clog.x.mb.ext = 0;
241 sctp_clog.x.mb.refcnt = 0;
242 }
243 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 SCTP_LOG_EVENT_MBUF,
245 from,
246 sctp_clog.x.misc.log1,
247 sctp_clog.x.misc.log2,
248 sctp_clog.x.misc.log3,
249 sctp_clog.x.misc.log4);
250 }
251
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 struct mbuf *mat;
256
257 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 sctp_log_mb(mat, from);
259 }
260 }
261 #endif
262
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 struct sctp_cwnd_log sctp_clog;
267
268 if (control == NULL) {
269 SCTP_PRINTF("Gak log of NULL?\n");
270 return;
271 }
272 sctp_clog.x.strlog.stcb = control->stcb;
273 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 sctp_clog.x.strlog.strm = control->sinfo_stream;
276 if (poschk != NULL) {
277 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 } else {
280 sctp_clog.x.strlog.e_tsn = 0;
281 sctp_clog.x.strlog.e_sseq = 0;
282 }
283 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 SCTP_LOG_EVENT_STRM,
285 from,
286 sctp_clog.x.misc.log1,
287 sctp_clog.x.misc.log2,
288 sctp_clog.x.misc.log3,
289 sctp_clog.x.misc.log4);
290 }
291
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 struct sctp_cwnd_log sctp_clog;
296
297 sctp_clog.x.cwnd.net = net;
298 if (stcb->asoc.send_queue_cnt > 255)
299 sctp_clog.x.cwnd.cnt_in_send = 255;
300 else
301 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 if (stcb->asoc.stream_queue_cnt > 255)
303 sctp_clog.x.cwnd.cnt_in_str = 255;
304 else
305 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306
307 if (net) {
308 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 sctp_clog.x.cwnd.inflight = net->flight_size;
310 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 }
314 if (SCTP_CWNDLOG_PRESEND == from) {
315 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 }
317 sctp_clog.x.cwnd.cwnd_augment = augment;
318 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 SCTP_LOG_EVENT_CWND,
320 from,
321 sctp_clog.x.misc.log1,
322 sctp_clog.x.misc.log2,
323 sctp_clog.x.misc.log3,
324 sctp_clog.x.misc.log4);
325 }
326
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 struct sctp_cwnd_log sctp_clog;
331
332 memset(&sctp_clog, 0, sizeof(sctp_clog));
333 if (inp) {
334 sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335
336 } else {
337 sctp_clog.x.lock.sock = (void *)NULL;
338 }
339 sctp_clog.x.lock.inp = (void *)inp;
340 if (stcb) {
341 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 } else {
343 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 }
345 if (inp) {
346 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 } else {
349 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 }
352 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 if (inp && (inp->sctp_socket)) {
354 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 } else {
358 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 }
362 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 SCTP_LOG_LOCK_EVENT,
364 from,
365 sctp_clog.x.misc.log1,
366 sctp_clog.x.misc.log2,
367 sctp_clog.x.misc.log3,
368 sctp_clog.x.misc.log4);
369 }
370
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 struct sctp_cwnd_log sctp_clog;
375
376 memset(&sctp_clog, 0, sizeof(sctp_clog));
377 sctp_clog.x.cwnd.net = net;
378 sctp_clog.x.cwnd.cwnd_new_value = error;
379 sctp_clog.x.cwnd.inflight = net->flight_size;
380 sctp_clog.x.cwnd.cwnd_augment = burst;
381 if (stcb->asoc.send_queue_cnt > 255)
382 sctp_clog.x.cwnd.cnt_in_send = 255;
383 else
384 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 if (stcb->asoc.stream_queue_cnt > 255)
386 sctp_clog.x.cwnd.cnt_in_str = 255;
387 else
388 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 SCTP_LOG_EVENT_MAXBURST,
391 from,
392 sctp_clog.x.misc.log1,
393 sctp_clog.x.misc.log2,
394 sctp_clog.x.misc.log3,
395 sctp_clog.x.misc.log4);
396 }
397
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 struct sctp_cwnd_log sctp_clog;
402
403 sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 sctp_clog.x.rwnd.send_size = snd_size;
405 sctp_clog.x.rwnd.overhead = overhead;
406 sctp_clog.x.rwnd.new_rwnd = 0;
407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 SCTP_LOG_EVENT_RWND,
409 from,
410 sctp_clog.x.misc.log1,
411 sctp_clog.x.misc.log2,
412 sctp_clog.x.misc.log3,
413 sctp_clog.x.misc.log4);
414 }
415
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 struct sctp_cwnd_log sctp_clog;
420
421 sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 sctp_clog.x.rwnd.send_size = flight_size;
423 sctp_clog.x.rwnd.overhead = overhead;
424 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 SCTP_LOG_EVENT_RWND,
427 from,
428 sctp_clog.x.misc.log1,
429 sctp_clog.x.misc.log2,
430 sctp_clog.x.misc.log3,
431 sctp_clog.x.misc.log4);
432 }
433
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 struct sctp_cwnd_log sctp_clog;
439
440 sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 sctp_clog.x.mbcnt.size_change = book;
442 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 SCTP_LOG_EVENT_MBCNT,
446 from,
447 sctp_clog.x.misc.log1,
448 sctp_clog.x.misc.log2,
449 sctp_clog.x.misc.log3,
450 sctp_clog.x.misc.log4);
451 }
452 #endif
453
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 SCTP_LOG_MISC_EVENT,
459 from,
460 a, b, c, d);
461 }
462
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 struct sctp_cwnd_log sctp_clog;
467
468 sctp_clog.x.wake.stcb = (void *)stcb;
469 sctp_clog.x.wake.wake_cnt = wake_cnt;
470 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473
474 if (stcb->asoc.stream_queue_cnt < 0xff)
475 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 else
477 sctp_clog.x.wake.stream_qcnt = 0xff;
478
479 if (stcb->asoc.chunks_on_out_queue < 0xff)
480 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 else
482 sctp_clog.x.wake.chunks_on_oque = 0xff;
483
484 sctp_clog.x.wake.sctpflags = 0;
485 /* set in the defered mode stuff */
486 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 sctp_clog.x.wake.sctpflags |= 1;
488 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 sctp_clog.x.wake.sctpflags |= 2;
490 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 sctp_clog.x.wake.sctpflags |= 4;
492 /* what about the sb */
493 if (stcb->sctp_socket) {
494 struct socket *so = stcb->sctp_socket;
495
496 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 } else {
498 sctp_clog.x.wake.sbflags = 0xff;
499 }
500 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 SCTP_LOG_EVENT_WAKE,
502 from,
503 sctp_clog.x.misc.log1,
504 sctp_clog.x.misc.log2,
505 sctp_clog.x.misc.log3,
506 sctp_clog.x.misc.log4);
507 }
508
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 struct sctp_cwnd_log sctp_clog;
513
514 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 SCTP_LOG_EVENT_BLOCK,
523 from,
524 sctp_clog.x.misc.log1,
525 sctp_clog.x.misc.log2,
526 sctp_clog.x.misc.log3,
527 sctp_clog.x.misc.log4);
528 }
529
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 /* May need to fix this if ktrdump does not work */
534 return (0);
535 }
536
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 int i;
546 int cnt;
547
548 cnt = 0;
549 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 if ((sctp_audit_data[i][0] == 0xe0) &&
551 (sctp_audit_data[i][1] == 0x01)) {
552 cnt = 0;
553 SCTP_PRINTF("\n");
554 } else if (sctp_audit_data[i][0] == 0xf0) {
555 cnt = 0;
556 SCTP_PRINTF("\n");
557 } else if ((sctp_audit_data[i][0] == 0xc0) &&
558 (sctp_audit_data[i][1] == 0x01)) {
559 SCTP_PRINTF("\n");
560 cnt = 0;
561 }
562 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 (uint32_t)sctp_audit_data[i][1]);
564 cnt++;
565 if ((cnt % 14) == 0)
566 SCTP_PRINTF("\n");
567 }
568 for (i = 0; i < sctp_audit_indx; i++) {
569 if ((sctp_audit_data[i][0] == 0xe0) &&
570 (sctp_audit_data[i][1] == 0x01)) {
571 cnt = 0;
572 SCTP_PRINTF("\n");
573 } else if (sctp_audit_data[i][0] == 0xf0) {
574 cnt = 0;
575 SCTP_PRINTF("\n");
576 } else if ((sctp_audit_data[i][0] == 0xc0) &&
577 (sctp_audit_data[i][1] == 0x01)) {
578 SCTP_PRINTF("\n");
579 cnt = 0;
580 }
581 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 (uint32_t)sctp_audit_data[i][1]);
583 cnt++;
584 if ((cnt % 14) == 0)
585 SCTP_PRINTF("\n");
586 }
587 SCTP_PRINTF("\n");
588 }
589
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592 struct sctp_nets *net)
593 {
594 int resend_cnt, tot_out, rep, tot_book_cnt;
595 struct sctp_nets *lnet;
596 struct sctp_tmit_chunk *chk;
597
598 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 sctp_audit_indx++;
601 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 sctp_audit_indx = 0;
603 }
604 if (inp == NULL) {
605 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 sctp_audit_indx++;
608 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 sctp_audit_indx = 0;
610 }
611 return;
612 }
613 if (stcb == NULL) {
614 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 sctp_audit_indx++;
617 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 sctp_audit_indx = 0;
619 }
620 return;
621 }
622 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 sctp_audit_data[sctp_audit_indx][1] =
624 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 sctp_audit_indx++;
626 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 sctp_audit_indx = 0;
628 }
629 rep = 0;
630 tot_book_cnt = 0;
631 resend_cnt = tot_out = 0;
632 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 resend_cnt++;
635 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 tot_out += chk->book_size;
637 tot_book_cnt++;
638 }
639 }
640 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 sctp_audit_indx++;
644 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 sctp_audit_indx = 0;
646 }
647 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 rep = 1;
650 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 sctp_audit_data[sctp_audit_indx][1] =
653 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 sctp_audit_indx++;
655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 sctp_audit_indx = 0;
657 }
658 }
659 if (tot_out != stcb->asoc.total_flight) {
660 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 sctp_audit_indx++;
663 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 sctp_audit_indx = 0;
665 }
666 rep = 1;
667 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 (int)stcb->asoc.total_flight);
669 stcb->asoc.total_flight = tot_out;
670 }
671 if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 sctp_audit_indx++;
675 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 sctp_audit_indx = 0;
677 }
678 rep = 1;
679 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680
681 stcb->asoc.total_flight_count = tot_book_cnt;
682 }
683 tot_out = 0;
684 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 tot_out += lnet->flight_size;
686 }
687 if (tot_out != stcb->asoc.total_flight) {
688 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 sctp_audit_indx++;
691 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 sctp_audit_indx = 0;
693 }
694 rep = 1;
695 SCTP_PRINTF("real flight:%d net total was %d\n",
696 stcb->asoc.total_flight, tot_out);
697 /* now corrective action */
698 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699
700 tot_out = 0;
701 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 if ((chk->whoTo == lnet) &&
703 (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 tot_out += chk->book_size;
705 }
706 }
707 if (lnet->flight_size != tot_out) {
708 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 (void *)lnet, lnet->flight_size,
710 tot_out);
711 lnet->flight_size = tot_out;
712 }
713 }
714 }
715 if (rep) {
716 sctp_print_audit_report();
717 }
718 }
719
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723
724 sctp_audit_data[sctp_audit_indx][0] = ev;
725 sctp_audit_data[sctp_audit_indx][1] = fd;
726 sctp_audit_indx++;
727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 sctp_audit_indx = 0;
729 }
730 }
731
732 #endif
733
734 /*
735 * sctp_stop_timers_for_shutdown() should be called
736 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737 * state to make sure that all timers are stopped.
738 */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 struct sctp_association *asoc;
743 struct sctp_nets *net;
744
745 asoc = &stcb->asoc;
746
747 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 }
756 }
757
758 /*
759 * a list of sizes based on typical mtu's, used only if next hop size not
760 * returned.
761 */
762 static uint32_t sctp_mtu_sizes[] = {
763 68,
764 296,
765 508,
766 512,
767 544,
768 576,
769 1006,
770 1492,
771 1500,
772 1536,
773 2002,
774 2048,
775 4352,
776 4464,
777 8166,
778 17914,
779 32000,
780 65535
781 };
782
783 /*
784 * Return the largest MTU smaller than val. If there is no
785 * entry, just return val.
786 */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 uint32_t i;
791
792 if (val <= sctp_mtu_sizes[0]) {
793 return (val);
794 }
795 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 if (val <= sctp_mtu_sizes[i]) {
797 break;
798 }
799 }
800 return (sctp_mtu_sizes[i - 1]);
801 }
802
803 /*
804 * Return the smallest MTU larger than val. If there is no
805 * entry, just return val.
806 */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 /* select another MTU that is just bigger than this one */
811 uint32_t i;
812
813 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 if (val < sctp_mtu_sizes[i]) {
815 return (sctp_mtu_sizes[i]);
816 }
817 }
818 return (val);
819 }
820
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 /*
825 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 * our counter. The result becomes our good random numbers and we
827 * then setup to give these out. Note that we do no locking to
828 * protect this. This is ok, since if competing folks call this we
829 * will get more gobbled gook in the random store which is what we
830 * want. There is a danger that two guys will use the same random
831 * numbers, but thats ok too since that is random as well :->
832 */
833 m->store_at = 0;
834 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 sizeof(m->random_counter), (uint8_t *)m->random_store);
837 m->random_counter++;
838 }
839
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 /*
844 * A true implementation should use random selection process to get
845 * the initial stream sequence number, using RFC1750 as a good
846 * guideline
847 */
848 uint32_t x, *xp;
849 uint8_t *p;
850 int store_at, new_store;
851
852 if (inp->initial_sequence_debug != 0) {
853 uint32_t ret;
854
855 ret = inp->initial_sequence_debug;
856 inp->initial_sequence_debug++;
857 return (ret);
858 }
859 retry:
860 store_at = inp->store_at;
861 new_store = store_at + sizeof(uint32_t);
862 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 new_store = 0;
864 }
865 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 goto retry;
867 }
868 if (new_store == 0) {
869 /* Refill the random store */
870 sctp_fill_random_store(inp);
871 }
872 p = &inp->random_store[store_at];
873 xp = (uint32_t *)p;
874 x = *xp;
875 return (x);
876 }
877
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 uint32_t x;
882 struct timeval now;
883
884 if (check) {
885 (void)SCTP_GETTIME_TIMEVAL(&now);
886 }
887 for (;;) {
888 x = sctp_select_initial_TSN(&inp->sctp_ep);
889 if (x == 0) {
890 /* we never use 0 */
891 continue;
892 }
893 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 break;
895 }
896 }
897 return (x);
898 }
899
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 int32_t user_state;
904
905 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 user_state = SCTP_CLOSED;
907 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 user_state = SCTP_SHUTDOWN_PENDING;
909 } else {
910 switch (kernel_state & SCTP_STATE_MASK) {
911 case SCTP_STATE_EMPTY:
912 user_state = SCTP_CLOSED;
913 break;
914 case SCTP_STATE_INUSE:
915 user_state = SCTP_CLOSED;
916 break;
917 case SCTP_STATE_COOKIE_WAIT:
918 user_state = SCTP_COOKIE_WAIT;
919 break;
920 case SCTP_STATE_COOKIE_ECHOED:
921 user_state = SCTP_COOKIE_ECHOED;
922 break;
923 case SCTP_STATE_OPEN:
924 user_state = SCTP_ESTABLISHED;
925 break;
926 case SCTP_STATE_SHUTDOWN_SENT:
927 user_state = SCTP_SHUTDOWN_SENT;
928 break;
929 case SCTP_STATE_SHUTDOWN_RECEIVED:
930 user_state = SCTP_SHUTDOWN_RECEIVED;
931 break;
932 case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 user_state = SCTP_SHUTDOWN_ACK_SENT;
934 break;
935 default:
936 user_state = SCTP_CLOSED;
937 break;
938 }
939 }
940 return (user_state);
941 }
942
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 struct sctp_association *asoc;
948
949 /*
950 * Anything set to zero is taken care of by the allocation routine's
951 * bzero
952 */
953
954 /*
955 * Up front select what scoping to apply on addresses I tell my peer
956 * Not sure what to do with these right now, we will need to come up
957 * with a way to set them. We may need to pass them through from the
958 * caller in the sctp_aloc_assoc() function.
959 */
960 int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 int j;
963 #endif
964
965 asoc = &stcb->asoc;
966 /* init all variables to a known value. */
967 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 asoc->max_burst = inp->sctp_ep.max_burst;
969 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 asoc->ecn_supported = inp->ecn_supported;
974 asoc->prsctp_supported = inp->prsctp_supported;
975 asoc->idata_supported = inp->idata_supported;
976 asoc->auth_supported = inp->auth_supported;
977 asoc->asconf_supported = inp->asconf_supported;
978 asoc->reconfig_supported = inp->reconfig_supported;
979 asoc->nrsack_supported = inp->nrsack_supported;
980 asoc->pktdrop_supported = inp->pktdrop_supported;
981 asoc->idata_supported = inp->idata_supported;
982 asoc->sctp_cmt_pf = (uint8_t)0;
983 asoc->sctp_frag_point = inp->sctp_frag_point;
984 asoc->sctp_features = inp->sctp_features;
985 asoc->default_dscp = inp->sctp_ep.default_dscp;
986 asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 if (inp->sctp_ep.default_flowlabel) {
989 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 } else {
991 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 asoc->default_flowlabel &= 0x000fffff;
994 asoc->default_flowlabel |= 0x80000000;
995 } else {
996 asoc->default_flowlabel = 0;
997 }
998 }
999 #endif
1000 asoc->sb_send_resv = 0;
1001 if (override_tag) {
1002 asoc->my_vtag = override_tag;
1003 } else {
1004 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 }
1006 /* Get the nonce tags */
1007 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 asoc->vrf_id = vrf_id;
1010
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 asoc->tsn_in_at = 0;
1013 asoc->tsn_out_at = 0;
1014 asoc->tsn_in_wrapped = 0;
1015 asoc->tsn_out_wrapped = 0;
1016 asoc->cumack_log_at = 0;
1017 asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 asoc->fs_index = 0;
1021 #endif
1022 asoc->refcnt = 0;
1023 asoc->assoc_up_sent = 0;
1024 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 sctp_select_initial_TSN(&inp->sctp_ep);
1026 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 /* we are optimisitic here */
1028 asoc->peer_supports_nat = 0;
1029 asoc->sent_queue_retran_cnt = 0;
1030
1031 /* for CMT */
1032 asoc->last_net_cmt_send_started = NULL;
1033
1034 /* This will need to be adjusted */
1035 asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 asoc->asconf_seq_in = asoc->last_acked_seq;
1038
1039 /* here we are different, we hold the next one we expect */
1040 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041
1042 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 asoc->initial_rto = inp->sctp_ep.initial_rto;
1044
1045 asoc->default_mtu = inp->sctp_ep.default_mtu;
1046 asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 asoc->free_chunk_cnt = 0;
1051
1052 asoc->iam_blocking = 0;
1053 asoc->context = inp->sctp_context;
1054 asoc->local_strreset_support = inp->local_strreset_support;
1055 asoc->def_send = inp->def_send;
1056 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 asoc->pr_sctp_cnt = 0;
1059 asoc->total_output_queue_size = 0;
1060
1061 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 asoc->scope.ipv6_addr_legal = 1;
1063 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 asoc->scope.ipv4_addr_legal = 1;
1065 } else {
1066 asoc->scope.ipv4_addr_legal = 0;
1067 }
1068 } else {
1069 asoc->scope.ipv6_addr_legal = 0;
1070 asoc->scope.ipv4_addr_legal = 1;
1071 }
1072
1073 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075
1076 asoc->smallest_mtu = inp->sctp_frag_point;
1077 asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079
1080 asoc->stream_locked_on = 0;
1081 asoc->ecn_echo_cnt_onq = 0;
1082 asoc->stream_locked = 0;
1083
1084 asoc->send_sack = 1;
1085
1086 LIST_INIT(&asoc->sctp_restricted_addrs);
1087
1088 TAILQ_INIT(&asoc->nets);
1089 TAILQ_INIT(&asoc->pending_reply_queue);
1090 TAILQ_INIT(&asoc->asconf_ack_sent);
1091 /* Setup to fill the hb random cache at first HB */
1092 asoc->hb_random_idx = 4;
1093
1094 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1095
1096 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1097 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1098
1099 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1100 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1101
1102 /*
1103 * Now the stream parameters, here we allocate space for all streams
1104 * that we request by default.
1105 */
1106 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1107 o_strms;
1108 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1109 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1110 SCTP_M_STRMO);
1111 if (asoc->strmout == NULL) {
1112 /* big trouble no memory */
1113 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1114 return (ENOMEM);
1115 }
1116 for (i = 0; i < asoc->streamoutcnt; i++) {
1117 /*
1118 * inbound side must be set to 0xffff, also NOTE when we get
1119 * the INIT-ACK back (for INIT sender) we MUST reduce the
1120 * count (streamoutcnt) but first check if we sent to any of
1121 * the upper streams that were dropped (if some were). Those
1122 * that were dropped must be notified to the upper layer as
1123 * failed to send.
1124 */
1125 asoc->strmout[i].next_mid_ordered = 0;
1126 asoc->strmout[i].next_mid_unordered = 0;
1127 TAILQ_INIT(&asoc->strmout[i].outqueue);
1128 asoc->strmout[i].chunks_on_queues = 0;
1129 #if defined(SCTP_DETAILED_STR_STATS)
1130 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1131 asoc->strmout[i].abandoned_sent[j] = 0;
1132 asoc->strmout[i].abandoned_unsent[j] = 0;
1133 }
1134 #else
1135 asoc->strmout[i].abandoned_sent[0] = 0;
1136 asoc->strmout[i].abandoned_unsent[0] = 0;
1137 #endif
1138 asoc->strmout[i].sid = i;
1139 asoc->strmout[i].last_msg_incomplete = 0;
1140 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1141 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1142 }
1143 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1144
1145 /* Now the mapping array */
1146 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1147 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1148 SCTP_M_MAP);
1149 if (asoc->mapping_array == NULL) {
1150 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1151 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1152 return (ENOMEM);
1153 }
1154 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1155 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1156 SCTP_M_MAP);
1157 if (asoc->nr_mapping_array == NULL) {
1158 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1159 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1160 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1161 return (ENOMEM);
1162 }
1163 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1164
1165 /* Now the init of the other outqueues */
1166 TAILQ_INIT(&asoc->free_chunks);
1167 TAILQ_INIT(&asoc->control_send_queue);
1168 TAILQ_INIT(&asoc->asconf_send_queue);
1169 TAILQ_INIT(&asoc->send_queue);
1170 TAILQ_INIT(&asoc->sent_queue);
1171 TAILQ_INIT(&asoc->resetHead);
1172 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1173 TAILQ_INIT(&asoc->asconf_queue);
1174 /* authentication fields */
1175 asoc->authinfo.random = NULL;
1176 asoc->authinfo.active_keyid = 0;
1177 asoc->authinfo.assoc_key = NULL;
1178 asoc->authinfo.assoc_keyid = 0;
1179 asoc->authinfo.recv_key = NULL;
1180 asoc->authinfo.recv_keyid = 0;
1181 LIST_INIT(&asoc->shared_keys);
1182 asoc->marked_retrans = 0;
1183 asoc->port = inp->sctp_ep.port;
1184 asoc->timoinit = 0;
1185 asoc->timodata = 0;
1186 asoc->timosack = 0;
1187 asoc->timoshutdown = 0;
1188 asoc->timoheartbeat = 0;
1189 asoc->timocookie = 0;
1190 asoc->timoshutdownack = 0;
1191 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 asoc->discontinuity_time = asoc->start_time;
1193 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1194 asoc->abandoned_unsent[i] = 0;
1195 asoc->abandoned_sent[i] = 0;
1196 }
1197 /*
1198 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1199 * freed later when the association is freed.
1200 */
1201 return (0);
1202 }
1203
1204 void
1205 sctp_print_mapping_array(struct sctp_association *asoc)
1206 {
1207 unsigned int i, limit;
1208
1209 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1210 asoc->mapping_array_size,
1211 asoc->mapping_array_base_tsn,
1212 asoc->cumulative_tsn,
1213 asoc->highest_tsn_inside_map,
1214 asoc->highest_tsn_inside_nr_map);
1215 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1216 if (asoc->mapping_array[limit - 1] != 0) {
1217 break;
1218 }
1219 }
1220 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1221 for (i = 0; i < limit; i++) {
1222 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1223 }
1224 if (limit % 16)
1225 SCTP_PRINTF("\n");
1226 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1227 if (asoc->nr_mapping_array[limit - 1]) {
1228 break;
1229 }
1230 }
1231 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1232 for (i = 0; i < limit; i++) {
1233 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1234 }
1235 if (limit % 16)
1236 SCTP_PRINTF("\n");
1237 }
1238
1239 int
1240 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1241 {
1242 /* mapping array needs to grow */
1243 uint8_t *new_array1, *new_array2;
1244 uint32_t new_size;
1245
1246 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1247 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1248 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1249 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1250 /* can't get more, forget it */
1251 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1252 if (new_array1) {
1253 SCTP_FREE(new_array1, SCTP_M_MAP);
1254 }
1255 if (new_array2) {
1256 SCTP_FREE(new_array2, SCTP_M_MAP);
1257 }
1258 return (-1);
1259 }
1260 memset(new_array1, 0, new_size);
1261 memset(new_array2, 0, new_size);
1262 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1263 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1264 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1265 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1266 asoc->mapping_array = new_array1;
1267 asoc->nr_mapping_array = new_array2;
1268 asoc->mapping_array_size = new_size;
1269 return (0);
1270 }
1271
1272
1273 static void
1274 sctp_iterator_work(struct sctp_iterator *it)
1275 {
1276 int iteration_count = 0;
1277 int inp_skip = 0;
1278 int first_in = 1;
1279 struct sctp_inpcb *tinp;
1280
1281 SCTP_INP_INFO_RLOCK();
1282 SCTP_ITERATOR_LOCK();
1283 sctp_it_ctl.cur_it = it;
1284 if (it->inp) {
1285 SCTP_INP_RLOCK(it->inp);
1286 SCTP_INP_DECR_REF(it->inp);
1287 }
1288 if (it->inp == NULL) {
1289 /* iterator is complete */
1290 done_with_iterator:
1291 sctp_it_ctl.cur_it = NULL;
1292 SCTP_ITERATOR_UNLOCK();
1293 SCTP_INP_INFO_RUNLOCK();
1294 if (it->function_atend != NULL) {
1295 (*it->function_atend) (it->pointer, it->val);
1296 }
1297 SCTP_FREE(it, SCTP_M_ITER);
1298 return;
1299 }
1300 select_a_new_ep:
1301 if (first_in) {
1302 first_in = 0;
1303 } else {
1304 SCTP_INP_RLOCK(it->inp);
1305 }
1306 while (((it->pcb_flags) &&
1307 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1308 ((it->pcb_features) &&
1309 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1310 /* endpoint flags or features don't match, so keep looking */
1311 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1312 SCTP_INP_RUNLOCK(it->inp);
1313 goto done_with_iterator;
1314 }
1315 tinp = it->inp;
1316 it->inp = LIST_NEXT(it->inp, sctp_list);
1317 SCTP_INP_RUNLOCK(tinp);
1318 if (it->inp == NULL) {
1319 goto done_with_iterator;
1320 }
1321 SCTP_INP_RLOCK(it->inp);
1322 }
1323 /* now go through each assoc which is in the desired state */
1324 if (it->done_current_ep == 0) {
1325 if (it->function_inp != NULL)
1326 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1327 it->done_current_ep = 1;
1328 }
1329 if (it->stcb == NULL) {
1330 /* run the per instance function */
1331 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1332 }
1333 if ((inp_skip) || it->stcb == NULL) {
1334 if (it->function_inp_end != NULL) {
1335 inp_skip = (*it->function_inp_end) (it->inp,
1336 it->pointer,
1337 it->val);
1338 }
1339 SCTP_INP_RUNLOCK(it->inp);
1340 goto no_stcb;
1341 }
1342 while (it->stcb) {
1343 SCTP_TCB_LOCK(it->stcb);
1344 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1345 /* not in the right state... keep looking */
1346 SCTP_TCB_UNLOCK(it->stcb);
1347 goto next_assoc;
1348 }
1349 /* see if we have limited out the iterator loop */
1350 iteration_count++;
1351 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1352 /* Pause to let others grab the lock */
1353 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1354 SCTP_TCB_UNLOCK(it->stcb);
1355 SCTP_INP_INCR_REF(it->inp);
1356 SCTP_INP_RUNLOCK(it->inp);
1357 SCTP_ITERATOR_UNLOCK();
1358 SCTP_INP_INFO_RUNLOCK();
1359 SCTP_INP_INFO_RLOCK();
1360 SCTP_ITERATOR_LOCK();
1361 if (sctp_it_ctl.iterator_flags) {
1362 /* We won't be staying here */
1363 SCTP_INP_DECR_REF(it->inp);
1364 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1365 if (sctp_it_ctl.iterator_flags &
1366 SCTP_ITERATOR_STOP_CUR_IT) {
1367 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1368 goto done_with_iterator;
1369 }
1370 if (sctp_it_ctl.iterator_flags &
1371 SCTP_ITERATOR_STOP_CUR_INP) {
1372 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1373 goto no_stcb;
1374 }
1375 /* If we reach here huh? */
1376 SCTP_PRINTF("Unknown it ctl flag %x\n",
1377 sctp_it_ctl.iterator_flags);
1378 sctp_it_ctl.iterator_flags = 0;
1379 }
1380 SCTP_INP_RLOCK(it->inp);
1381 SCTP_INP_DECR_REF(it->inp);
1382 SCTP_TCB_LOCK(it->stcb);
1383 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1384 iteration_count = 0;
1385 }
1386 /* run function on this one */
1387 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1388
1389 /*
1390 * we lie here, it really needs to have its own type but
1391 * first I must verify that this won't effect things :-0
1392 */
1393 if (it->no_chunk_output == 0)
1394 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1395
1396 SCTP_TCB_UNLOCK(it->stcb);
1397 next_assoc:
1398 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1399 if (it->stcb == NULL) {
1400 /* Run last function */
1401 if (it->function_inp_end != NULL) {
1402 inp_skip = (*it->function_inp_end) (it->inp,
1403 it->pointer,
1404 it->val);
1405 }
1406 }
1407 }
1408 SCTP_INP_RUNLOCK(it->inp);
1409 no_stcb:
1410 /* done with all assocs on this endpoint, move on to next endpoint */
1411 it->done_current_ep = 0;
1412 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 it->inp = NULL;
1414 } else {
1415 it->inp = LIST_NEXT(it->inp, sctp_list);
1416 }
1417 if (it->inp == NULL) {
1418 goto done_with_iterator;
1419 }
1420 goto select_a_new_ep;
1421 }
1422
1423 void
1424 sctp_iterator_worker(void)
1425 {
1426 struct sctp_iterator *it, *nit;
1427
1428 /* This function is called with the WQ lock in place */
1429
1430 sctp_it_ctl.iterator_running = 1;
1431 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1432 /* now lets work on this one */
1433 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1434 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1435 CURVNET_SET(it->vn);
1436 sctp_iterator_work(it);
1437 CURVNET_RESTORE();
1438 SCTP_IPI_ITERATOR_WQ_LOCK();
1439 /* sa_ignore FREED_MEMORY */
1440 }
1441 sctp_it_ctl.iterator_running = 0;
1442 return;
1443 }
1444
1445
1446 static void
1447 sctp_handle_addr_wq(void)
1448 {
1449 /* deal with the ADDR wq from the rtsock calls */
1450 struct sctp_laddr *wi, *nwi;
1451 struct sctp_asconf_iterator *asc;
1452
1453 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1454 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1455 if (asc == NULL) {
1456 /* Try later, no memory */
1457 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1458 (struct sctp_inpcb *)NULL,
1459 (struct sctp_tcb *)NULL,
1460 (struct sctp_nets *)NULL);
1461 return;
1462 }
1463 LIST_INIT(&asc->list_of_work);
1464 asc->cnt = 0;
1465
1466 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 LIST_REMOVE(wi, sctp_nxt_addr);
1468 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 asc->cnt++;
1470 }
1471
1472 if (asc->cnt == 0) {
1473 SCTP_FREE(asc, SCTP_M_ASC_IT);
1474 } else {
1475 int ret;
1476
1477 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1478 sctp_asconf_iterator_stcb,
1479 NULL, /* No ep end for boundall */
1480 SCTP_PCB_FLAGS_BOUNDALL,
1481 SCTP_PCB_ANY_FEATURES,
1482 SCTP_ASOC_ANY_STATE,
1483 (void *)asc, 0,
1484 sctp_asconf_iterator_end, NULL, 0);
1485 if (ret) {
1486 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1487 /*
1488 * Freeing if we are stopping or put back on the
1489 * addr_wq.
1490 */
1491 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1492 sctp_asconf_iterator_end(asc, 0);
1493 } else {
1494 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1495 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1496 }
1497 SCTP_FREE(asc, SCTP_M_ASC_IT);
1498 }
1499 }
1500 }
1501 }
1502
1503 void
1504 sctp_timeout_handler(void *t)
1505 {
1506 struct sctp_inpcb *inp;
1507 struct sctp_tcb *stcb;
1508 struct sctp_nets *net;
1509 struct sctp_timer *tmr;
1510 struct mbuf *op_err;
1511 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1512 struct socket *so;
1513 #endif
1514 int did_output;
1515 int type;
1516
1517 tmr = (struct sctp_timer *)t;
1518 inp = (struct sctp_inpcb *)tmr->ep;
1519 stcb = (struct sctp_tcb *)tmr->tcb;
1520 net = (struct sctp_nets *)tmr->net;
1521 CURVNET_SET((struct vnet *)tmr->vnet);
1522 did_output = 1;
1523
1524 #ifdef SCTP_AUDITING_ENABLED
1525 sctp_audit_log(0xF0, (uint8_t)tmr->type);
1526 sctp_auditing(3, inp, stcb, net);
1527 #endif
1528
1529 /* sanity checks... */
1530 if (tmr->self != (void *)tmr) {
1531 /*
1532 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1533 * (void *)tmr);
1534 */
1535 CURVNET_RESTORE();
1536 return;
1537 }
1538 tmr->stopped_from = 0xa001;
1539 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1540 /*
1541 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1542 * tmr->type);
1543 */
1544 CURVNET_RESTORE();
1545 return;
1546 }
1547 tmr->stopped_from = 0xa002;
1548 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1549 CURVNET_RESTORE();
1550 return;
1551 }
1552 /* if this is an iterator timeout, get the struct and clear inp */
1553 tmr->stopped_from = 0xa003;
1554 if (inp) {
1555 SCTP_INP_INCR_REF(inp);
1556 if ((inp->sctp_socket == NULL) &&
1557 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1558 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1559 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1560 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1561 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1562 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1563 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1564 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1565 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1566 SCTP_INP_DECR_REF(inp);
1567 CURVNET_RESTORE();
1568 return;
1569 }
1570 }
1571 tmr->stopped_from = 0xa004;
1572 if (stcb) {
1573 atomic_add_int(&stcb->asoc.refcnt, 1);
1574 if (stcb->asoc.state == 0) {
1575 atomic_add_int(&stcb->asoc.refcnt, -1);
1576 if (inp) {
1577 SCTP_INP_DECR_REF(inp);
1578 }
1579 CURVNET_RESTORE();
1580 return;
1581 }
1582 }
1583 type = tmr->type;
1584 tmr->stopped_from = 0xa005;
1585 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1586 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1587 if (inp) {
1588 SCTP_INP_DECR_REF(inp);
1589 }
1590 if (stcb) {
1591 atomic_add_int(&stcb->asoc.refcnt, -1);
1592 }
1593 CURVNET_RESTORE();
1594 return;
1595 }
1596 tmr->stopped_from = 0xa006;
1597
1598 if (stcb) {
1599 SCTP_TCB_LOCK(stcb);
1600 atomic_add_int(&stcb->asoc.refcnt, -1);
1601 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1602 ((stcb->asoc.state == 0) ||
1603 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1604 SCTP_TCB_UNLOCK(stcb);
1605 if (inp) {
1606 SCTP_INP_DECR_REF(inp);
1607 }
1608 CURVNET_RESTORE();
1609 return;
1610 }
1611 } else if (inp != NULL) {
1612 if (type != SCTP_TIMER_TYPE_INPKILL) {
1613 SCTP_INP_WLOCK(inp);
1614 }
1615 } else {
1616 SCTP_WQ_ADDR_LOCK();
1617 }
1618 /* record in stopped what t-o occurred */
1619 tmr->stopped_from = type;
1620
1621 /* mark as being serviced now */
1622 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1623 /*
1624 * Callout has been rescheduled.
1625 */
1626 goto get_out;
1627 }
1628 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1629 /*
1630 * Not active, so no action.
1631 */
1632 goto get_out;
1633 }
1634 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1635
1636 /* call the handler for the appropriate timer type */
1637 switch (type) {
1638 case SCTP_TIMER_TYPE_ADDR_WQ:
1639 sctp_handle_addr_wq();
1640 break;
1641 case SCTP_TIMER_TYPE_SEND:
1642 if ((stcb == NULL) || (inp == NULL)) {
1643 break;
1644 }
1645 SCTP_STAT_INCR(sctps_timodata);
1646 stcb->asoc.timodata++;
1647 stcb->asoc.num_send_timers_up--;
1648 if (stcb->asoc.num_send_timers_up < 0) {
1649 stcb->asoc.num_send_timers_up = 0;
1650 }
1651 SCTP_TCB_LOCK_ASSERT(stcb);
1652 if (sctp_t3rxt_timer(inp, stcb, net)) {
1653 /* no need to unlock on tcb its gone */
1654
1655 goto out_decr;
1656 }
1657 SCTP_TCB_LOCK_ASSERT(stcb);
1658 #ifdef SCTP_AUDITING_ENABLED
1659 sctp_auditing(4, inp, stcb, net);
1660 #endif
1661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662 if ((stcb->asoc.num_send_timers_up == 0) &&
1663 (stcb->asoc.sent_queue_cnt > 0)) {
1664 struct sctp_tmit_chunk *chk;
1665
1666 /*
1667 * safeguard. If there on some on the sent queue
1668 * somewhere but no timers running something is
1669 * wrong... so we start a timer on the first chunk
1670 * on the send queue on whatever net it is sent to.
1671 */
1672 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1673 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1674 chk->whoTo);
1675 }
1676 break;
1677 case SCTP_TIMER_TYPE_INIT:
1678 if ((stcb == NULL) || (inp == NULL)) {
1679 break;
1680 }
1681 SCTP_STAT_INCR(sctps_timoinit);
1682 stcb->asoc.timoinit++;
1683 if (sctp_t1init_timer(inp, stcb, net)) {
1684 /* no need to unlock on tcb its gone */
1685 goto out_decr;
1686 }
1687 /* We do output but not here */
1688 did_output = 0;
1689 break;
1690 case SCTP_TIMER_TYPE_RECV:
1691 if ((stcb == NULL) || (inp == NULL)) {
1692 break;
1693 }
1694 SCTP_STAT_INCR(sctps_timosack);
1695 stcb->asoc.timosack++;
1696 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1697 #ifdef SCTP_AUDITING_ENABLED
1698 sctp_auditing(4, inp, stcb, net);
1699 #endif
1700 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1701 break;
1702 case SCTP_TIMER_TYPE_SHUTDOWN:
1703 if ((stcb == NULL) || (inp == NULL)) {
1704 break;
1705 }
1706 if (sctp_shutdown_timer(inp, stcb, net)) {
1707 /* no need to unlock on tcb its gone */
1708 goto out_decr;
1709 }
1710 SCTP_STAT_INCR(sctps_timoshutdown);
1711 stcb->asoc.timoshutdown++;
1712 #ifdef SCTP_AUDITING_ENABLED
1713 sctp_auditing(4, inp, stcb, net);
1714 #endif
1715 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1716 break;
1717 case SCTP_TIMER_TYPE_HEARTBEAT:
1718 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1719 break;
1720 }
1721 SCTP_STAT_INCR(sctps_timoheartbeat);
1722 stcb->asoc.timoheartbeat++;
1723 if (sctp_heartbeat_timer(inp, stcb, net)) {
1724 /* no need to unlock on tcb its gone */
1725 goto out_decr;
1726 }
1727 #ifdef SCTP_AUDITING_ENABLED
1728 sctp_auditing(4, inp, stcb, net);
1729 #endif
1730 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1731 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1732 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1733 }
1734 break;
1735 case SCTP_TIMER_TYPE_COOKIE:
1736 if ((stcb == NULL) || (inp == NULL)) {
1737 break;
1738 }
1739 if (sctp_cookie_timer(inp, stcb, net)) {
1740 /* no need to unlock on tcb its gone */
1741 goto out_decr;
1742 }
1743 SCTP_STAT_INCR(sctps_timocookie);
1744 stcb->asoc.timocookie++;
1745 #ifdef SCTP_AUDITING_ENABLED
1746 sctp_auditing(4, inp, stcb, net);
1747 #endif
1748 /*
1749 * We consider T3 and Cookie timer pretty much the same with
1750 * respect to where from in chunk_output.
1751 */
1752 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1753 break;
1754 case SCTP_TIMER_TYPE_NEWCOOKIE:
1755 {
1756 struct timeval tv;
1757 int i, secret;
1758
1759 if (inp == NULL) {
1760 break;
1761 }
1762 SCTP_STAT_INCR(sctps_timosecret);
1763 (void)SCTP_GETTIME_TIMEVAL(&tv);
1764 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1765 inp->sctp_ep.last_secret_number =
1766 inp->sctp_ep.current_secret_number;
1767 inp->sctp_ep.current_secret_number++;
1768 if (inp->sctp_ep.current_secret_number >=
1769 SCTP_HOW_MANY_SECRETS) {
1770 inp->sctp_ep.current_secret_number = 0;
1771 }
1772 secret = (int)inp->sctp_ep.current_secret_number;
1773 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1774 inp->sctp_ep.secret_key[secret][i] =
1775 sctp_select_initial_TSN(&inp->sctp_ep);
1776 }
1777 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1778 }
1779 did_output = 0;
1780 break;
1781 case SCTP_TIMER_TYPE_PATHMTURAISE:
1782 if ((stcb == NULL) || (inp == NULL)) {
1783 break;
1784 }
1785 SCTP_STAT_INCR(sctps_timopathmtu);
1786 sctp_pathmtu_timer(inp, stcb, net);
1787 did_output = 0;
1788 break;
1789 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1790 if ((stcb == NULL) || (inp == NULL)) {
1791 break;
1792 }
1793 if (sctp_shutdownack_timer(inp, stcb, net)) {
1794 /* no need to unlock on tcb its gone */
1795 goto out_decr;
1796 }
1797 SCTP_STAT_INCR(sctps_timoshutdownack);
1798 stcb->asoc.timoshutdownack++;
1799 #ifdef SCTP_AUDITING_ENABLED
1800 sctp_auditing(4, inp, stcb, net);
1801 #endif
1802 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1803 break;
1804 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1805 if ((stcb == NULL) || (inp == NULL)) {
1806 break;
1807 }
1808 SCTP_STAT_INCR(sctps_timoshutdownguard);
1809 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1810 "Shutdown guard timer expired");
1811 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1812 /* no need to unlock on tcb its gone */
1813 goto out_decr;
1814
1815 case SCTP_TIMER_TYPE_STRRESET:
1816 if ((stcb == NULL) || (inp == NULL)) {
1817 break;
1818 }
1819 if (sctp_strreset_timer(inp, stcb, net)) {
1820 /* no need to unlock on tcb its gone */
1821 goto out_decr;
1822 }
1823 SCTP_STAT_INCR(sctps_timostrmrst);
1824 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1825 break;
1826 case SCTP_TIMER_TYPE_ASCONF:
1827 if ((stcb == NULL) || (inp == NULL)) {
1828 break;
1829 }
1830 if (sctp_asconf_timer(inp, stcb, net)) {
1831 /* no need to unlock on tcb its gone */
1832 goto out_decr;
1833 }
1834 SCTP_STAT_INCR(sctps_timoasconf);
1835 #ifdef SCTP_AUDITING_ENABLED
1836 sctp_auditing(4, inp, stcb, net);
1837 #endif
1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1839 break;
1840 case SCTP_TIMER_TYPE_PRIM_DELETED:
1841 if ((stcb == NULL) || (inp == NULL)) {
1842 break;
1843 }
1844 sctp_delete_prim_timer(inp, stcb, net);
1845 SCTP_STAT_INCR(sctps_timodelprim);
1846 break;
1847
1848 case SCTP_TIMER_TYPE_AUTOCLOSE:
1849 if ((stcb == NULL) || (inp == NULL)) {
1850 break;
1851 }
1852 SCTP_STAT_INCR(sctps_timoautoclose);
1853 sctp_autoclose_timer(inp, stcb, net);
1854 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1855 did_output = 0;
1856 break;
1857 case SCTP_TIMER_TYPE_ASOCKILL:
1858 if ((stcb == NULL) || (inp == NULL)) {
1859 break;
1860 }
1861 SCTP_STAT_INCR(sctps_timoassockill);
1862 /* Can we free it yet? */
1863 SCTP_INP_DECR_REF(inp);
1864 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1865 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1867 so = SCTP_INP_SO(inp);
1868 atomic_add_int(&stcb->asoc.refcnt, 1);
1869 SCTP_TCB_UNLOCK(stcb);
1870 SCTP_SOCKET_LOCK(so, 1);
1871 SCTP_TCB_LOCK(stcb);
1872 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1873 #endif
1874 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1875 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1877 SCTP_SOCKET_UNLOCK(so, 1);
1878 #endif
1879 /*
1880 * free asoc, always unlocks (or destroy's) so prevent
1881 * duplicate unlock or unlock of a free mtx :-0
1882 */
1883 stcb = NULL;
1884 goto out_no_decr;
1885 case SCTP_TIMER_TYPE_INPKILL:
1886 SCTP_STAT_INCR(sctps_timoinpkill);
1887 if (inp == NULL) {
1888 break;
1889 }
1890 /*
1891 * special case, take away our increment since WE are the
1892 * killer
1893 */
1894 SCTP_INP_DECR_REF(inp);
1895 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1896 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1897 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1898 SCTP_CALLED_FROM_INPKILL_TIMER);
1899 inp = NULL;
1900 goto out_no_decr;
1901 default:
1902 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1903 type);
1904 break;
1905 }
1906 #ifdef SCTP_AUDITING_ENABLED
1907 sctp_audit_log(0xF1, (uint8_t)type);
1908 if (inp)
1909 sctp_auditing(5, inp, stcb, net);
1910 #endif
1911 if ((did_output) && stcb) {
1912 /*
1913 * Now we need to clean up the control chunk chain if an
1914 * ECNE is on it. It must be marked as UNSENT again so next
1915 * call will continue to send it until such time that we get
1916 * a CWR, to remove it. It is, however, less likely that we
1917 * will find a ecn echo on the chain though.
1918 */
1919 sctp_fix_ecn_echo(&stcb->asoc);
1920 }
1921 get_out:
1922 if (stcb) {
1923 SCTP_TCB_UNLOCK(stcb);
1924 } else if (inp != NULL) {
1925 SCTP_INP_WUNLOCK(inp);
1926 } else {
1927 SCTP_WQ_ADDR_UNLOCK();
1928 }
1929
1930 out_decr:
1931 if (inp) {
1932 SCTP_INP_DECR_REF(inp);
1933 }
1934 out_no_decr:
1935 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1936 CURVNET_RESTORE();
1937 }
1938
1939 void
1940 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1941 struct sctp_nets *net)
1942 {
1943 uint32_t to_ticks;
1944 struct sctp_timer *tmr;
1945
1946 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1947 return;
1948
1949 tmr = NULL;
1950 if (stcb) {
1951 SCTP_TCB_LOCK_ASSERT(stcb);
1952 }
1953 switch (t_type) {
1954 case SCTP_TIMER_TYPE_ADDR_WQ:
1955 /* Only 1 tick away :-) */
1956 tmr = &SCTP_BASE_INFO(addr_wq_timer);
1957 to_ticks = SCTP_ADDRESS_TICK_DELAY;
1958 break;
1959 case SCTP_TIMER_TYPE_SEND:
1960 /* Here we use the RTO timer */
1961 {
1962 int rto_val;
1963
1964 if ((stcb == NULL) || (net == NULL)) {
1965 return;
1966 }
1967 tmr = &net->rxt_timer;
1968 if (net->RTO == 0) {
1969 rto_val = stcb->asoc.initial_rto;
1970 } else {
1971 rto_val = net->RTO;
1972 }
1973 to_ticks = MSEC_TO_TICKS(rto_val);
1974 }
1975 break;
1976 case SCTP_TIMER_TYPE_INIT:
1977 /*
1978 * Here we use the INIT timer default usually about 1
1979 * minute.
1980 */
1981 if ((stcb == NULL) || (net == NULL)) {
1982 return;
1983 }
1984 tmr = &net->rxt_timer;
1985 if (net->RTO == 0) {
1986 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1987 } else {
1988 to_ticks = MSEC_TO_TICKS(net->RTO);
1989 }
1990 break;
1991 case SCTP_TIMER_TYPE_RECV:
1992 /*
1993 * Here we use the Delayed-Ack timer value from the inp
1994 * ususually about 200ms.
1995 */
1996 if (stcb == NULL) {
1997 return;
1998 }
1999 tmr = &stcb->asoc.dack_timer;
2000 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2001 break;
2002 case SCTP_TIMER_TYPE_SHUTDOWN:
2003 /* Here we use the RTO of the destination. */
2004 if ((stcb == NULL) || (net == NULL)) {
2005 return;
2006 }
2007 if (net->RTO == 0) {
2008 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 } else {
2010 to_ticks = MSEC_TO_TICKS(net->RTO);
2011 }
2012 tmr = &net->rxt_timer;
2013 break;
2014 case SCTP_TIMER_TYPE_HEARTBEAT:
2015 /*
2016 * the net is used here so that we can add in the RTO. Even
2017 * though we use a different timer. We also add the HB timer
2018 * PLUS a random jitter.
2019 */
2020 if ((stcb == NULL) || (net == NULL)) {
2021 return;
2022 } else {
2023 uint32_t rndval;
2024 uint32_t jitter;
2025
2026 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2027 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2028 return;
2029 }
2030 if (net->RTO == 0) {
2031 to_ticks = stcb->asoc.initial_rto;
2032 } else {
2033 to_ticks = net->RTO;
2034 }
2035 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2036 jitter = rndval % to_ticks;
2037 if (jitter >= (to_ticks >> 1)) {
2038 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2039 } else {
2040 to_ticks = to_ticks - jitter;
2041 }
2042 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2043 !(net->dest_state & SCTP_ADDR_PF)) {
2044 to_ticks += net->heart_beat_delay;
2045 }
2046 /*
2047 * Now we must convert the to_ticks that are now in
2048 * ms to ticks.
2049 */
2050 to_ticks = MSEC_TO_TICKS(to_ticks);
2051 tmr = &net->hb_timer;
2052 }
2053 break;
2054 case SCTP_TIMER_TYPE_COOKIE:
2055 /*
2056 * Here we can use the RTO timer from the network since one
2057 * RTT was compelete. If a retran happened then we will be
2058 * using the RTO initial value.
2059 */
2060 if ((stcb == NULL) || (net == NULL)) {
2061 return;
2062 }
2063 if (net->RTO == 0) {
2064 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2065 } else {
2066 to_ticks = MSEC_TO_TICKS(net->RTO);
2067 }
2068 tmr = &net->rxt_timer;
2069 break;
2070 case SCTP_TIMER_TYPE_NEWCOOKIE:
2071 /*
2072 * nothing needed but the endpoint here ususually about 60
2073 * minutes.
2074 */
2075 tmr = &inp->sctp_ep.signature_change;
2076 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2077 break;
2078 case SCTP_TIMER_TYPE_ASOCKILL:
2079 if (stcb == NULL) {
2080 return;
2081 }
2082 tmr = &stcb->asoc.strreset_timer;
2083 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2084 break;
2085 case SCTP_TIMER_TYPE_INPKILL:
2086 /*
2087 * The inp is setup to die. We re-use the signature_chage
2088 * timer since that has stopped and we are in the GONE
2089 * state.
2090 */
2091 tmr = &inp->sctp_ep.signature_change;
2092 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2093 break;
2094 case SCTP_TIMER_TYPE_PATHMTURAISE:
2095 /*
2096 * Here we use the value found in the EP for PMTU ususually
2097 * about 10 minutes.
2098 */
2099 if ((stcb == NULL) || (net == NULL)) {
2100 return;
2101 }
2102 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2103 return;
2104 }
2105 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2106 tmr = &net->pmtu_timer;
2107 break;
2108 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2109 /* Here we use the RTO of the destination */
2110 if ((stcb == NULL) || (net == NULL)) {
2111 return;
2112 }
2113 if (net->RTO == 0) {
2114 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2115 } else {
2116 to_ticks = MSEC_TO_TICKS(net->RTO);
2117 }
2118 tmr = &net->rxt_timer;
2119 break;
2120 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2121 /*
2122 * Here we use the endpoints shutdown guard timer usually
2123 * about 3 minutes.
2124 */
2125 if (stcb == NULL) {
2126 return;
2127 }
2128 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2129 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2130 } else {
2131 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2132 }
2133 tmr = &stcb->asoc.shut_guard_timer;
2134 break;
2135 case SCTP_TIMER_TYPE_STRRESET:
2136 /*
2137 * Here the timer comes from the stcb but its value is from
2138 * the net's RTO.
2139 */
2140 if ((stcb == NULL) || (net == NULL)) {
2141 return;
2142 }
2143 if (net->RTO == 0) {
2144 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2145 } else {
2146 to_ticks = MSEC_TO_TICKS(net->RTO);
2147 }
2148 tmr = &stcb->asoc.strreset_timer;
2149 break;
2150 case SCTP_TIMER_TYPE_ASCONF:
2151 /*
2152 * Here the timer comes from the stcb but its value is from
2153 * the net's RTO.
2154 */
2155 if ((stcb == NULL) || (net == NULL)) {
2156 return;
2157 }
2158 if (net->RTO == 0) {
2159 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2160 } else {
2161 to_ticks = MSEC_TO_TICKS(net->RTO);
2162 }
2163 tmr = &stcb->asoc.asconf_timer;
2164 break;
2165 case SCTP_TIMER_TYPE_PRIM_DELETED:
2166 if ((stcb == NULL) || (net != NULL)) {
2167 return;
2168 }
2169 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2170 tmr = &stcb->asoc.delete_prim_timer;
2171 break;
2172 case SCTP_TIMER_TYPE_AUTOCLOSE:
2173 if (stcb == NULL) {
2174 return;
2175 }
2176 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2177 /*
2178 * Really an error since stcb is NOT set to
2179 * autoclose
2180 */
2181 return;
2182 }
2183 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2184 tmr = &stcb->asoc.autoclose_timer;
2185 break;
2186 default:
2187 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2188 __func__, t_type);
2189 return;
2190 break;
2191 }
2192 if ((to_ticks <= 0) || (tmr == NULL)) {
2193 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2194 __func__, t_type, to_ticks, (void *)tmr);
2195 return;
2196 }
2197 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2198 /*
2199 * we do NOT allow you to have it already running. if it is
2200 * we leave the current one up unchanged
2201 */
2202 return;
2203 }
2204 /* At this point we can proceed */
2205 if (t_type == SCTP_TIMER_TYPE_SEND) {
2206 stcb->asoc.num_send_timers_up++;
2207 }
2208 tmr->stopped_from = 0;
2209 tmr->type = t_type;
2210 tmr->ep = (void *)inp;
2211 tmr->tcb = (void *)stcb;
2212 tmr->net = (void *)net;
2213 tmr->self = (void *)tmr;
2214 tmr->vnet = (void *)curvnet;
2215 tmr->ticks = sctp_get_tick_count();
2216 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2217 return;
2218 }
2219
2220 void
2221 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2222 struct sctp_nets *net, uint32_t from)
2223 {
2224 struct sctp_timer *tmr;
2225
2226 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2227 (inp == NULL))
2228 return;
2229
2230 tmr = NULL;
2231 if (stcb) {
2232 SCTP_TCB_LOCK_ASSERT(stcb);
2233 }
2234 switch (t_type) {
2235 case SCTP_TIMER_TYPE_ADDR_WQ:
2236 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2237 break;
2238 case SCTP_TIMER_TYPE_SEND:
2239 if ((stcb == NULL) || (net == NULL)) {
2240 return;
2241 }
2242 tmr = &net->rxt_timer;
2243 break;
2244 case SCTP_TIMER_TYPE_INIT:
2245 if ((stcb == NULL) || (net == NULL)) {
2246 return;
2247 }
2248 tmr = &net->rxt_timer;
2249 break;
2250 case SCTP_TIMER_TYPE_RECV:
2251 if (stcb == NULL) {
2252 return;
2253 }
2254 tmr = &stcb->asoc.dack_timer;
2255 break;
2256 case SCTP_TIMER_TYPE_SHUTDOWN:
2257 if ((stcb == NULL) || (net == NULL)) {
2258 return;
2259 }
2260 tmr = &net->rxt_timer;
2261 break;
2262 case SCTP_TIMER_TYPE_HEARTBEAT:
2263 if ((stcb == NULL) || (net == NULL)) {
2264 return;
2265 }
2266 tmr = &net->hb_timer;
2267 break;
2268 case SCTP_TIMER_TYPE_COOKIE:
2269 if ((stcb == NULL) || (net == NULL)) {
2270 return;
2271 }
2272 tmr = &net->rxt_timer;
2273 break;
2274 case SCTP_TIMER_TYPE_NEWCOOKIE:
2275 /* nothing needed but the endpoint here */
2276 tmr = &inp->sctp_ep.signature_change;
2277 /*
2278 * We re-use the newcookie timer for the INP kill timer. We
2279 * must assure that we do not kill it by accident.
2280 */
2281 break;
2282 case SCTP_TIMER_TYPE_ASOCKILL:
2283 /*
2284 * Stop the asoc kill timer.
2285 */
2286 if (stcb == NULL) {
2287 return;
2288 }
2289 tmr = &stcb->asoc.strreset_timer;
2290 break;
2291
2292 case SCTP_TIMER_TYPE_INPKILL:
2293 /*
2294 * The inp is setup to die. We re-use the signature_chage
2295 * timer since that has stopped and we are in the GONE
2296 * state.
2297 */
2298 tmr = &inp->sctp_ep.signature_change;
2299 break;
2300 case SCTP_TIMER_TYPE_PATHMTURAISE:
2301 if ((stcb == NULL) || (net == NULL)) {
2302 return;
2303 }
2304 tmr = &net->pmtu_timer;
2305 break;
2306 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2307 if ((stcb == NULL) || (net == NULL)) {
2308 return;
2309 }
2310 tmr = &net->rxt_timer;
2311 break;
2312 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2313 if (stcb == NULL) {
2314 return;
2315 }
2316 tmr = &stcb->asoc.shut_guard_timer;
2317 break;
2318 case SCTP_TIMER_TYPE_STRRESET:
2319 if (stcb == NULL) {
2320 return;
2321 }
2322 tmr = &stcb->asoc.strreset_timer;
2323 break;
2324 case SCTP_TIMER_TYPE_ASCONF:
2325 if (stcb == NULL) {
2326 return;
2327 }
2328 tmr = &stcb->asoc.asconf_timer;
2329 break;
2330 case SCTP_TIMER_TYPE_PRIM_DELETED:
2331 if (stcb == NULL) {
2332 return;
2333 }
2334 tmr = &stcb->asoc.delete_prim_timer;
2335 break;
2336 case SCTP_TIMER_TYPE_AUTOCLOSE:
2337 if (stcb == NULL) {
2338 return;
2339 }
2340 tmr = &stcb->asoc.autoclose_timer;
2341 break;
2342 default:
2343 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2344 __func__, t_type);
2345 break;
2346 }
2347 if (tmr == NULL) {
2348 return;
2349 }
2350 if ((tmr->type != t_type) && tmr->type) {
2351 /*
2352 * Ok we have a timer that is under joint use. Cookie timer
2353 * per chance with the SEND timer. We therefore are NOT
2354 * running the timer that the caller wants stopped. So just
2355 * return.
2356 */
2357 return;
2358 }
2359 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2360 stcb->asoc.num_send_timers_up--;
2361 if (stcb->asoc.num_send_timers_up < 0) {
2362 stcb->asoc.num_send_timers_up = 0;
2363 }
2364 }
2365 tmr->self = NULL;
2366 tmr->stopped_from = from;
2367 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2368 return;
2369 }
2370
2371 uint32_t
2372 sctp_calculate_len(struct mbuf *m)
2373 {
2374 uint32_t tlen = 0;
2375 struct mbuf *at;
2376
2377 at = m;
2378 while (at) {
2379 tlen += SCTP_BUF_LEN(at);
2380 at = SCTP_BUF_NEXT(at);
2381 }
2382 return (tlen);
2383 }
2384
2385 void
2386 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2387 struct sctp_association *asoc, uint32_t mtu)
2388 {
2389 /*
2390 * Reset the P-MTU size on this association, this involves changing
2391 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2392 * allow the DF flag to be cleared.
2393 */
2394 struct sctp_tmit_chunk *chk;
2395 unsigned int eff_mtu, ovh;
2396
2397 asoc->smallest_mtu = mtu;
2398 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2399 ovh = SCTP_MIN_OVERHEAD;
2400 } else {
2401 ovh = SCTP_MIN_V4_OVERHEAD;
2402 }
2403 eff_mtu = mtu - ovh;
2404 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2405 if (chk->send_size > eff_mtu) {
2406 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2407 }
2408 }
2409 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2410 if (chk->send_size > eff_mtu) {
2411 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2412 }
2413 }
2414 }
2415
2416
2417 /*
2418 * given an association and starting time of the current RTT period return
2419 * RTO in number of msecs net should point to the current network
2420 */
2421
2422 uint32_t
2423 sctp_calculate_rto(struct sctp_tcb *stcb,
2424 struct sctp_association *asoc,
2425 struct sctp_nets *net,
2426 struct timeval *old,
2427 int rtt_from_sack)
2428 {
2429 /*-
2430 * given an association and the starting time of the current RTT
2431 * period (in value1/value2) return RTO in number of msecs.
2432 */
2433 int32_t rtt; /* RTT in ms */
2434 uint32_t new_rto;
2435 int first_measure = 0;
2436 struct timeval now;
2437
2438 /************************/
2439 /* 1. calculate new RTT */
2440 /************************/
2441 /* get the current time */
2442 if (stcb->asoc.use_precise_time) {
2443 (void)SCTP_GETPTIME_TIMEVAL(&now);
2444 } else {
2445 (void)SCTP_GETTIME_TIMEVAL(&now);
2446 }
2447 timevalsub(&now, old);
2448 /* store the current RTT in us */
2449 net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2450 (uint64_t)now.tv_usec;
2451
2452 /* compute rtt in ms */
2453 rtt = (int32_t)(net->rtt / 1000);
2454 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2455 /*
2456 * Tell the CC module that a new update has just occurred
2457 * from a sack
2458 */
2459 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2460 }
2461 /*
2462 * Do we need to determine the lan? We do this only on sacks i.e.
2463 * RTT being determined from data not non-data (HB/INIT->INITACK).
2464 */
2465 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2466 (net->lan_type == SCTP_LAN_UNKNOWN)) {
2467 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2468 net->lan_type = SCTP_LAN_INTERNET;
2469 } else {
2470 net->lan_type = SCTP_LAN_LOCAL;
2471 }
2472 }
2473 /***************************/
2474 /* 2. update RTTVAR & SRTT */
2475 /***************************/
2476 /*-
2477 * Compute the scaled average lastsa and the
2478 * scaled variance lastsv as described in van Jacobson
2479 * Paper "Congestion Avoidance and Control", Annex A.
2480 *
2481 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2482 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2483 */
2484 if (net->RTO_measured) {
2485 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2486 net->lastsa += rtt;
2487 if (rtt < 0) {
2488 rtt = -rtt;
2489 }
2490 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2491 net->lastsv += rtt;
2492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2493 rto_logging(net, SCTP_LOG_RTTVAR);
2494 }
2495 } else {
2496 /* First RTO measurment */
2497 net->RTO_measured = 1;
2498 first_measure = 1;
2499 net->lastsa = rtt << SCTP_RTT_SHIFT;
2500 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2501 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2502 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2503 }
2504 }
2505 if (net->lastsv == 0) {
2506 net->lastsv = SCTP_CLOCK_GRANULARITY;
2507 }
2508 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2509 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2510 (stcb->asoc.sat_network_lockout == 0)) {
2511 stcb->asoc.sat_network = 1;
2512 } else if ((!first_measure) && stcb->asoc.sat_network) {
2513 stcb->asoc.sat_network = 0;
2514 stcb->asoc.sat_network_lockout = 1;
2515 }
2516 /* bound it, per C6/C7 in Section 5.3.1 */
2517 if (new_rto < stcb->asoc.minrto) {
2518 new_rto = stcb->asoc.minrto;
2519 }
2520 if (new_rto > stcb->asoc.maxrto) {
2521 new_rto = stcb->asoc.maxrto;
2522 }
2523 /* we are now returning the RTO */
2524 return (new_rto);
2525 }
2526
2527 /*
2528 * return a pointer to a contiguous piece of data from the given mbuf chain
2529 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2530 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2531 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2532 */
2533 caddr_t
2534 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2535 {
2536 uint32_t count;
2537 uint8_t *ptr;
2538
2539 ptr = in_ptr;
2540 if ((off < 0) || (len <= 0))
2541 return (NULL);
2542
2543 /* find the desired start location */
2544 while ((m != NULL) && (off > 0)) {
2545 if (off < SCTP_BUF_LEN(m))
2546 break;
2547 off -= SCTP_BUF_LEN(m);
2548 m = SCTP_BUF_NEXT(m);
2549 }
2550 if (m == NULL)
2551 return (NULL);
2552
2553 /* is the current mbuf large enough (eg. contiguous)? */
2554 if ((SCTP_BUF_LEN(m) - off) >= len) {
2555 return (mtod(m, caddr_t)+off);
2556 } else {
2557 /* else, it spans more than one mbuf, so save a temp copy... */
2558 while ((m != NULL) && (len > 0)) {
2559 count = min(SCTP_BUF_LEN(m) - off, len);
2560 memcpy(ptr, mtod(m, caddr_t)+off, count);
2561 len -= count;
2562 ptr += count;
2563 off = 0;
2564 m = SCTP_BUF_NEXT(m);
2565 }
2566 if ((m == NULL) && (len > 0))
2567 return (NULL);
2568 else
2569 return ((caddr_t)in_ptr);
2570 }
2571 }
2572
2573
2574
2575 struct sctp_paramhdr *
2576 sctp_get_next_param(struct mbuf *m,
2577 int offset,
2578 struct sctp_paramhdr *pull,
2579 int pull_limit)
2580 {
2581 /* This just provides a typed signature to Peter's Pull routine */
2582 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2583 (uint8_t *)pull));
2584 }
2585
2586
2587 struct mbuf *
2588 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2589 {
2590 struct mbuf *m_last;
2591 caddr_t dp;
2592
2593 if (padlen > 3) {
2594 return (NULL);
2595 }
2596 if (padlen <= M_TRAILINGSPACE(m)) {
2597 /*
2598 * The easy way. We hope the majority of the time we hit
2599 * here :)
2600 */
2601 m_last = m;
2602 } else {
2603 /* Hard way we must grow the mbuf chain */
2604 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2605 if (m_last == NULL) {
2606 return (NULL);
2607 }
2608 SCTP_BUF_LEN(m_last) = 0;
2609 SCTP_BUF_NEXT(m_last) = NULL;
2610 SCTP_BUF_NEXT(m) = m_last;
2611 }
2612 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2613 SCTP_BUF_LEN(m_last) += padlen;
2614 memset(dp, 0, padlen);
2615 return (m_last);
2616 }
2617
2618 struct mbuf *
2619 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2620 {
2621 /* find the last mbuf in chain and pad it */
2622 struct mbuf *m_at;
2623
2624 if (last_mbuf != NULL) {
2625 return (sctp_add_pad_tombuf(last_mbuf, padval));
2626 } else {
2627 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2628 if (SCTP_BUF_NEXT(m_at) == NULL) {
2629 return (sctp_add_pad_tombuf(m_at, padval));
2630 }
2631 }
2632 }
2633 return (NULL);
2634 }
2635
2636 static void
2637 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2638 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2639 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2640 SCTP_UNUSED
2641 #endif
2642 )
2643 {
2644 struct mbuf *m_notify;
2645 struct sctp_assoc_change *sac;
2646 struct sctp_queued_to_read *control;
2647 unsigned int notif_len;
2648 uint16_t abort_len;
2649 unsigned int i;
2650 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2651 struct socket *so;
2652 #endif
2653
2654 if (stcb == NULL) {
2655 return;
2656 }
2657 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2658 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2659 if (abort != NULL) {
2660 abort_len = ntohs(abort->ch.chunk_length);
2661 /*
2662 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2663 * contiguous.
2664 */
2665 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2666 abort_len = SCTP_CHUNK_BUFFER_SIZE;
2667 }
2668 } else {
2669 abort_len = 0;
2670 }
2671 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2672 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2673 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2674 notif_len += abort_len;
2675 }
2676 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2677 if (m_notify == NULL) {
2678 /* Retry with smaller value. */
2679 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2680 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2681 if (m_notify == NULL) {
2682 goto set_error;
2683 }
2684 }
2685 SCTP_BUF_NEXT(m_notify) = NULL;
2686 sac = mtod(m_notify, struct sctp_assoc_change *);
2687 memset(sac, 0, notif_len);
2688 sac->sac_type = SCTP_ASSOC_CHANGE;
2689 sac->sac_flags = 0;
2690 sac->sac_length = sizeof(struct sctp_assoc_change);
2691 sac->sac_state = state;
2692 sac->sac_error = error;
2693 /* XXX verify these stream counts */
2694 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2695 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2696 sac->sac_assoc_id = sctp_get_associd(stcb);
2697 if (notif_len > sizeof(struct sctp_assoc_change)) {
2698 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2699 i = 0;
2700 if (stcb->asoc.prsctp_supported == 1) {
2701 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2702 }
2703 if (stcb->asoc.auth_supported == 1) {
2704 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2705 }
2706 if (stcb->asoc.asconf_supported == 1) {
2707 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2708 }
2709 if (stcb->asoc.idata_supported == 1) {
2710 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2711 }
2712 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2713 if (stcb->asoc.reconfig_supported == 1) {
2714 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2715 }
2716 sac->sac_length += i;
2717 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2718 memcpy(sac->sac_info, abort, abort_len);
2719 sac->sac_length += abort_len;
2720 }
2721 }
2722 SCTP_BUF_LEN(m_notify) = sac->sac_length;
2723 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2724 0, 0, stcb->asoc.context, 0, 0, 0,
2725 m_notify);
2726 if (control != NULL) {
2727 control->length = SCTP_BUF_LEN(m_notify);
2728 control->spec_flags = M_NOTIFICATION;
2729 /* not that we need this */
2730 control->tail_mbuf = m_notify;
2731 sctp_add_to_readq(stcb->sctp_ep, stcb,
2732 control,
2733 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2734 so_locked);
2735 } else {
2736 sctp_m_freem(m_notify);
2737 }
2738 }
2739 /*
2740 * For 1-to-1 style sockets, we send up and error when an ABORT
2741 * comes in.
2742 */
2743 set_error:
2744 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2745 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2746 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2747 SOCK_LOCK(stcb->sctp_socket);
2748 if (from_peer) {
2749 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2750 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2751 stcb->sctp_socket->so_error = ECONNREFUSED;
2752 } else {
2753 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2754 stcb->sctp_socket->so_error = ECONNRESET;
2755 }
2756 } else {
2757 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2758 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2759 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2760 stcb->sctp_socket->so_error = ETIMEDOUT;
2761 } else {
2762 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2763 stcb->sctp_socket->so_error = ECONNABORTED;
2764 }
2765 }
2766 }
2767 /* Wake ANY sleepers */
2768 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2769 so = SCTP_INP_SO(stcb->sctp_ep);
2770 if (!so_locked) {
2771 atomic_add_int(&stcb->asoc.refcnt, 1);
2772 SCTP_TCB_UNLOCK(stcb);
2773 SCTP_SOCKET_LOCK(so, 1);
2774 SCTP_TCB_LOCK(stcb);
2775 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2776 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2777 SCTP_SOCKET_UNLOCK(so, 1);
2778 return;
2779 }
2780 }
2781 #endif
2782 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2783 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2784 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2785 socantrcvmore_locked(stcb->sctp_socket);
2786 }
2787 sorwakeup(stcb->sctp_socket);
2788 sowwakeup(stcb->sctp_socket);
2789 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2790 if (!so_locked) {
2791 SCTP_SOCKET_UNLOCK(so, 1);
2792 }
2793 #endif
2794 }
2795
2796 static void
2797 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2798 struct sockaddr *sa, uint32_t error, int so_locked
2799 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2800 SCTP_UNUSED
2801 #endif
2802 )
2803 {
2804 struct mbuf *m_notify;
2805 struct sctp_paddr_change *spc;
2806 struct sctp_queued_to_read *control;
2807
2808 if ((stcb == NULL) ||
2809 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2810 /* event not enabled */
2811 return;
2812 }
2813 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2814 if (m_notify == NULL)
2815 return;
2816 SCTP_BUF_LEN(m_notify) = 0;
2817 spc = mtod(m_notify, struct sctp_paddr_change *);
2818 memset(spc, 0, sizeof(struct sctp_paddr_change));
2819 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2820 spc->spc_flags = 0;
2821 spc->spc_length = sizeof(struct sctp_paddr_change);
2822 switch (sa->sa_family) {
2823 #ifdef INET
2824 case AF_INET:
2825 #ifdef INET6
2826 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2827 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2828 (struct sockaddr_in6 *)&spc->spc_aaddr);
2829 } else {
2830 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2831 }
2832 #else
2833 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2834 #endif
2835 break;
2836 #endif
2837 #ifdef INET6
2838 case AF_INET6:
2839 {
2840 struct sockaddr_in6 *sin6;
2841
2842 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2843
2844 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2845 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2846 if (sin6->sin6_scope_id == 0) {
2847 /* recover scope_id for user */
2848 (void)sa6_recoverscope(sin6);
2849 } else {
2850 /* clear embedded scope_id for user */
2851 in6_clearscope(&sin6->sin6_addr);
2852 }
2853 }
2854 break;
2855 }
2856 #endif
2857 default:
2858 /* TSNH */
2859 break;
2860 }
2861 spc->spc_state = state;
2862 spc->spc_error = error;
2863 spc->spc_assoc_id = sctp_get_associd(stcb);
2864
2865 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2866 SCTP_BUF_NEXT(m_notify) = NULL;
2867
2868 /* append to socket */
2869 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2870 0, 0, stcb->asoc.context, 0, 0, 0,
2871 m_notify);
2872 if (control == NULL) {
2873 /* no memory */
2874 sctp_m_freem(m_notify);
2875 return;
2876 }
2877 control->length = SCTP_BUF_LEN(m_notify);
2878 control->spec_flags = M_NOTIFICATION;
2879 /* not that we need this */
2880 control->tail_mbuf = m_notify;
2881 sctp_add_to_readq(stcb->sctp_ep, stcb,
2882 control,
2883 &stcb->sctp_socket->so_rcv, 1,
2884 SCTP_READ_LOCK_NOT_HELD,
2885 so_locked);
2886 }
2887
2888
2889 static void
2890 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2891 struct sctp_tmit_chunk *chk, int so_locked
2892 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2893 SCTP_UNUSED
2894 #endif
2895 )
2896 {
2897 struct mbuf *m_notify;
2898 struct sctp_send_failed *ssf;
2899 struct sctp_send_failed_event *ssfe;
2900 struct sctp_queued_to_read *control;
2901 struct sctp_chunkhdr *chkhdr;
2902 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2903
2904 if ((stcb == NULL) ||
2905 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2906 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2907 /* event not enabled */
2908 return;
2909 }
2910 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2911 notifhdr_len = sizeof(struct sctp_send_failed_event);
2912 } else {
2913 notifhdr_len = sizeof(struct sctp_send_failed);
2914 }
2915 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2916 if (m_notify == NULL)
2917 /* no space left */
2918 return;
2919 SCTP_BUF_LEN(m_notify) = notifhdr_len;
2920 if (stcb->asoc.idata_supported) {
2921 chkhdr_len = sizeof(struct sctp_idata_chunk);
2922 } else {
2923 chkhdr_len = sizeof(struct sctp_data_chunk);
2924 }
2925 /* Use some defaults in case we can't access the chunk header */
2926 if (chk->send_size >= chkhdr_len) {
2927 payload_len = chk->send_size - chkhdr_len;
2928 } else {
2929 payload_len = 0;
2930 }
2931 padding_len = 0;
2932 if (chk->data != NULL) {
2933 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2934 if (chkhdr != NULL) {
2935 chk_len = ntohs(chkhdr->chunk_length);
2936 if ((chk_len >= chkhdr_len) &&
2937 (chk->send_size >= chk_len) &&
2938 (chk->send_size - chk_len < 4)) {
2939 padding_len = chk->send_size - chk_len;
2940 payload_len = chk->send_size - chkhdr_len - padding_len;
2941 }
2942 }
2943 }
2944 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2946 memset(ssfe, 0, notifhdr_len);
2947 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2948 if (sent) {
2949 ssfe->ssfe_flags = SCTP_DATA_SENT;
2950 } else {
2951 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2952 }
2953 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2954 ssfe->ssfe_error = error;
2955 /* not exactly what the user sent in, but should be close :) */
2956 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2957 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2958 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2959 ssfe->ssfe_info.snd_context = chk->rec.data.context;
2960 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2961 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2962 } else {
2963 ssf = mtod(m_notify, struct sctp_send_failed *);
2964 memset(ssf, 0, notifhdr_len);
2965 ssf->ssf_type = SCTP_SEND_FAILED;
2966 if (sent) {
2967 ssf->ssf_flags = SCTP_DATA_SENT;
2968 } else {
2969 ssf->ssf_flags = SCTP_DATA_UNSENT;
2970 }
2971 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2972 ssf->ssf_error = error;
2973 /* not exactly what the user sent in, but should be close :) */
2974 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2975 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2976 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2977 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2978 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2979 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2980 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2981 }
2982 if (chk->data != NULL) {
2983 /* Trim off the sctp chunk header (it should be there) */
2984 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2985 m_adj(chk->data, chkhdr_len);
2986 m_adj(chk->data, -padding_len);
2987 sctp_mbuf_crush(chk->data);
2988 chk->send_size -= (chkhdr_len + padding_len);
2989 }
2990 }
2991 SCTP_BUF_NEXT(m_notify) = chk->data;
2992 /* Steal off the mbuf */
2993 chk->data = NULL;
2994 /*
2995 * For this case, we check the actual socket buffer, since the assoc
2996 * is going away we don't want to overfill the socket buffer for a
2997 * non-reader
2998 */
2999 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3000 sctp_m_freem(m_notify);
3001 return;
3002 }
3003 /* append to socket */
3004 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3005 0, 0, stcb->asoc.context, 0, 0, 0,
3006 m_notify);
3007 if (control == NULL) {
3008 /* no memory */
3009 sctp_m_freem(m_notify);
3010 return;
3011 }
3012 control->length = SCTP_BUF_LEN(m_notify);
3013 control->spec_flags = M_NOTIFICATION;
3014 /* not that we need this */
3015 control->tail_mbuf = m_notify;
3016 sctp_add_to_readq(stcb->sctp_ep, stcb,
3017 control,
3018 &stcb->sctp_socket->so_rcv, 1,
3019 SCTP_READ_LOCK_NOT_HELD,
3020 so_locked);
3021 }
3022
3023
3024 static void
3025 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3026 struct sctp_stream_queue_pending *sp, int so_locked
3027 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3028 SCTP_UNUSED
3029 #endif
3030 )
3031 {
3032 struct mbuf *m_notify;
3033 struct sctp_send_failed *ssf;
3034 struct sctp_send_failed_event *ssfe;
3035 struct sctp_queued_to_read *control;
3036 int notifhdr_len;
3037
3038 if ((stcb == NULL) ||
3039 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3040 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3041 /* event not enabled */
3042 return;
3043 }
3044 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3045 notifhdr_len = sizeof(struct sctp_send_failed_event);
3046 } else {
3047 notifhdr_len = sizeof(struct sctp_send_failed);
3048 }
3049 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3050 if (m_notify == NULL) {
3051 /* no space left */
3052 return;
3053 }
3054 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3055 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3056 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3057 memset(ssfe, 0, notifhdr_len);
3058 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3059 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3060 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3061 ssfe->ssfe_error = error;
3062 /* not exactly what the user sent in, but should be close :) */
3063 ssfe->ssfe_info.snd_sid = sp->sid;
3064 if (sp->some_taken) {
3065 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3066 } else {
3067 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3068 }
3069 ssfe->ssfe_info.snd_ppid = sp->ppid;
3070 ssfe->ssfe_info.snd_context = sp->context;
3071 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3072 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3073 } else {
3074 ssf = mtod(m_notify, struct sctp_send_failed *);
3075 memset(ssf, 0, notifhdr_len);
3076 ssf->ssf_type = SCTP_SEND_FAILED;
3077 ssf->ssf_flags = SCTP_DATA_UNSENT;
3078 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3079 ssf->ssf_error = error;
3080 /* not exactly what the user sent in, but should be close :) */
3081 ssf->ssf_info.sinfo_stream = sp->sid;
3082 ssf->ssf_info.sinfo_ssn = 0;
3083 if (sp->some_taken) {
3084 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3085 } else {
3086 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3087 }
3088 ssf->ssf_info.sinfo_ppid = sp->ppid;
3089 ssf->ssf_info.sinfo_context = sp->context;
3090 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3091 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3092 }
3093 SCTP_BUF_NEXT(m_notify) = sp->data;
3094
3095 /* Steal off the mbuf */
3096 sp->data = NULL;
3097 /*
3098 * For this case, we check the actual socket buffer, since the assoc
3099 * is going away we don't want to overfill the socket buffer for a
3100 * non-reader
3101 */
3102 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3103 sctp_m_freem(m_notify);
3104 return;
3105 }
3106 /* append to socket */
3107 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3108 0, 0, stcb->asoc.context, 0, 0, 0,
3109 m_notify);
3110 if (control == NULL) {
3111 /* no memory */
3112 sctp_m_freem(m_notify);
3113 return;
3114 }
3115 control->length = SCTP_BUF_LEN(m_notify);
3116 control->spec_flags = M_NOTIFICATION;
3117 /* not that we need this */
3118 control->tail_mbuf = m_notify;
3119 sctp_add_to_readq(stcb->sctp_ep, stcb,
3120 control,
3121 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3122 }
3123
3124
3125
3126 static void
3127 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3128 {
3129 struct mbuf *m_notify;
3130 struct sctp_adaptation_event *sai;
3131 struct sctp_queued_to_read *control;
3132
3133 if ((stcb == NULL) ||
3134 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3135 /* event not enabled */
3136 return;
3137 }
3138 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3139 if (m_notify == NULL)
3140 /* no space left */
3141 return;
3142 SCTP_BUF_LEN(m_notify) = 0;
3143 sai = mtod(m_notify, struct sctp_adaptation_event *);
3144 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3145 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3146 sai->sai_flags = 0;
3147 sai->sai_length = sizeof(struct sctp_adaptation_event);
3148 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3149 sai->sai_assoc_id = sctp_get_associd(stcb);
3150
3151 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3152 SCTP_BUF_NEXT(m_notify) = NULL;
3153
3154 /* append to socket */
3155 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3156 0, 0, stcb->asoc.context, 0, 0, 0,
3157 m_notify);
3158 if (control == NULL) {
3159 /* no memory */
3160 sctp_m_freem(m_notify);
3161 return;
3162 }
3163 control->length = SCTP_BUF_LEN(m_notify);
3164 control->spec_flags = M_NOTIFICATION;
3165 /* not that we need this */
3166 control->tail_mbuf = m_notify;
3167 sctp_add_to_readq(stcb->sctp_ep, stcb,
3168 control,
3169 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3170 }
3171
3172 /* This always must be called with the read-queue LOCKED in the INP */
3173 static void
3174 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3175 uint32_t val, int so_locked
3176 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3177 SCTP_UNUSED
3178 #endif
3179 )
3180 {
3181 struct mbuf *m_notify;
3182 struct sctp_pdapi_event *pdapi;
3183 struct sctp_queued_to_read *control;
3184 struct sockbuf *sb;
3185
3186 if ((stcb == NULL) ||
3187 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3188 /* event not enabled */
3189 return;
3190 }
3191 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3192 return;
3193 }
3194 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3195 if (m_notify == NULL)
3196 /* no space left */
3197 return;
3198 SCTP_BUF_LEN(m_notify) = 0;
3199 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3200 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3201 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3202 pdapi->pdapi_flags = 0;
3203 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3204 pdapi->pdapi_indication = error;
3205 pdapi->pdapi_stream = (val >> 16);
3206 pdapi->pdapi_seq = (val & 0x0000ffff);
3207 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3208
3209 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3210 SCTP_BUF_NEXT(m_notify) = NULL;
3211 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3212 0, 0, stcb->asoc.context, 0, 0, 0,
3213 m_notify);
3214 if (control == NULL) {
3215 /* no memory */
3216 sctp_m_freem(m_notify);
3217 return;
3218 }
3219 control->length = SCTP_BUF_LEN(m_notify);
3220 control->spec_flags = M_NOTIFICATION;
3221 /* not that we need this */
3222 control->tail_mbuf = m_notify;
3223 sb = &stcb->sctp_socket->so_rcv;
3224 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3225 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3226 }
3227 sctp_sballoc(stcb, sb, m_notify);
3228 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3229 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3230 }
3231 control->end_added = 1;
3232 if (stcb->asoc.control_pdapi)
3233 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3234 else {
3235 /* we really should not see this case */
3236 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3237 }
3238 if (stcb->sctp_ep && stcb->sctp_socket) {
3239 /* This should always be the case */
3240 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3241 struct socket *so;
3242
3243 so = SCTP_INP_SO(stcb->sctp_ep);
3244 if (!so_locked) {
3245 atomic_add_int(&stcb->asoc.refcnt, 1);
3246 SCTP_TCB_UNLOCK(stcb);
3247 SCTP_SOCKET_LOCK(so, 1);
3248 SCTP_TCB_LOCK(stcb);
3249 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3250 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3251 SCTP_SOCKET_UNLOCK(so, 1);
3252 return;
3253 }
3254 }
3255 #endif
3256 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3257 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3258 if (!so_locked) {
3259 SCTP_SOCKET_UNLOCK(so, 1);
3260 }
3261 #endif
3262 }
3263 }
3264
3265 static void
3266 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3267 {
3268 struct mbuf *m_notify;
3269 struct sctp_shutdown_event *sse;
3270 struct sctp_queued_to_read *control;
3271
3272 /*
3273 * For TCP model AND UDP connected sockets we will send an error up
3274 * when an SHUTDOWN completes
3275 */
3276 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3277 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3278 /* mark socket closed for read/write and wakeup! */
3279 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3280 struct socket *so;
3281
3282 so = SCTP_INP_SO(stcb->sctp_ep);
3283 atomic_add_int(&stcb->asoc.refcnt, 1);
3284 SCTP_TCB_UNLOCK(stcb);
3285 SCTP_SOCKET_LOCK(so, 1);
3286 SCTP_TCB_LOCK(stcb);
3287 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3288 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3289 SCTP_SOCKET_UNLOCK(so, 1);
3290 return;
3291 }
3292 #endif
3293 socantsendmore(stcb->sctp_socket);
3294 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 SCTP_SOCKET_UNLOCK(so, 1);
3296 #endif
3297 }
3298 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3299 /* event not enabled */
3300 return;
3301 }
3302 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3303 if (m_notify == NULL)
3304 /* no space left */
3305 return;
3306 sse = mtod(m_notify, struct sctp_shutdown_event *);
3307 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3308 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3309 sse->sse_flags = 0;
3310 sse->sse_length = sizeof(struct sctp_shutdown_event);
3311 sse->sse_assoc_id = sctp_get_associd(stcb);
3312
3313 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3314 SCTP_BUF_NEXT(m_notify) = NULL;
3315
3316 /* append to socket */
3317 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3318 0, 0, stcb->asoc.context, 0, 0, 0,
3319 m_notify);
3320 if (control == NULL) {
3321 /* no memory */
3322 sctp_m_freem(m_notify);
3323 return;
3324 }
3325 control->length = SCTP_BUF_LEN(m_notify);
3326 control->spec_flags = M_NOTIFICATION;
3327 /* not that we need this */
3328 control->tail_mbuf = m_notify;
3329 sctp_add_to_readq(stcb->sctp_ep, stcb,
3330 control,
3331 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3332 }
3333
3334 static void
3335 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3336 int so_locked
3337 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3338 SCTP_UNUSED
3339 #endif
3340 )
3341 {
3342 struct mbuf *m_notify;
3343 struct sctp_sender_dry_event *event;
3344 struct sctp_queued_to_read *control;
3345
3346 if ((stcb == NULL) ||
3347 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3348 /* event not enabled */
3349 return;
3350 }
3351 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3352 if (m_notify == NULL) {
3353 /* no space left */
3354 return;
3355 }
3356 SCTP_BUF_LEN(m_notify) = 0;
3357 event = mtod(m_notify, struct sctp_sender_dry_event *);
3358 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3359 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3360 event->sender_dry_flags = 0;
3361 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3362 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3363
3364 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3365 SCTP_BUF_NEXT(m_notify) = NULL;
3366
3367 /* append to socket */
3368 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3369 0, 0, stcb->asoc.context, 0, 0, 0,
3370 m_notify);
3371 if (control == NULL) {
3372 /* no memory */
3373 sctp_m_freem(m_notify);
3374 return;
3375 }
3376 control->length = SCTP_BUF_LEN(m_notify);
3377 control->spec_flags = M_NOTIFICATION;
3378 /* not that we need this */
3379 control->tail_mbuf = m_notify;
3380 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3381 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3382 }
3383
3384
3385 void
3386 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3387 {
3388 struct mbuf *m_notify;
3389 struct sctp_queued_to_read *control;
3390 struct sctp_stream_change_event *stradd;
3391
3392 if ((stcb == NULL) ||
3393 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3394 /* event not enabled */
3395 return;
3396 }
3397 if ((stcb->asoc.peer_req_out) && flag) {
3398 /* Peer made the request, don't tell the local user */
3399 stcb->asoc.peer_req_out = 0;
3400 return;
3401 }
3402 stcb->asoc.peer_req_out = 0;
3403 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3404 if (m_notify == NULL)
3405 /* no space left */
3406 return;
3407 SCTP_BUF_LEN(m_notify) = 0;
3408 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3409 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3410 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3411 stradd->strchange_flags = flag;
3412 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3413 stradd->strchange_assoc_id = sctp_get_associd(stcb);
3414 stradd->strchange_instrms = numberin;
3415 stradd->strchange_outstrms = numberout;
3416 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3417 SCTP_BUF_NEXT(m_notify) = NULL;
3418 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3419 /* no space */
3420 sctp_m_freem(m_notify);
3421 return;
3422 }
3423 /* append to socket */
3424 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3425 0, 0, stcb->asoc.context, 0, 0, 0,
3426 m_notify);
3427 if (control == NULL) {
3428 /* no memory */
3429 sctp_m_freem(m_notify);
3430 return;
3431 }
3432 control->length = SCTP_BUF_LEN(m_notify);
3433 control->spec_flags = M_NOTIFICATION;
3434 /* not that we need this */
3435 control->tail_mbuf = m_notify;
3436 sctp_add_to_readq(stcb->sctp_ep, stcb,
3437 control,
3438 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3439 }
3440
3441 void
3442 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3443 {
3444 struct mbuf *m_notify;
3445 struct sctp_queued_to_read *control;
3446 struct sctp_assoc_reset_event *strasoc;
3447
3448 if ((stcb == NULL) ||
3449 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3450 /* event not enabled */
3451 return;
3452 }
3453 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3454 if (m_notify == NULL)
3455 /* no space left */
3456 return;
3457 SCTP_BUF_LEN(m_notify) = 0;
3458 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3459 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3460 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3461 strasoc->assocreset_flags = flag;
3462 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3463 strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3464 strasoc->assocreset_local_tsn = sending_tsn;
3465 strasoc->assocreset_remote_tsn = recv_tsn;
3466 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3467 SCTP_BUF_NEXT(m_notify) = NULL;
3468 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3469 /* no space */
3470 sctp_m_freem(m_notify);
3471 return;
3472 }
3473 /* append to socket */
3474 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3475 0, 0, stcb->asoc.context, 0, 0, 0,
3476 m_notify);
3477 if (control == NULL) {
3478 /* no memory */
3479 sctp_m_freem(m_notify);
3480 return;
3481 }
3482 control->length = SCTP_BUF_LEN(m_notify);
3483 control->spec_flags = M_NOTIFICATION;
3484 /* not that we need this */
3485 control->tail_mbuf = m_notify;
3486 sctp_add_to_readq(stcb->sctp_ep, stcb,
3487 control,
3488 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3489 }
3490
3491
3492
3493 static void
3494 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3495 int number_entries, uint16_t *list, int flag)
3496 {
3497 struct mbuf *m_notify;
3498 struct sctp_queued_to_read *control;
3499 struct sctp_stream_reset_event *strreset;
3500 int len;
3501
3502 if ((stcb == NULL) ||
3503 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3504 /* event not enabled */
3505 return;
3506 }
3507 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3508 if (m_notify == NULL)
3509 /* no space left */
3510 return;
3511 SCTP_BUF_LEN(m_notify) = 0;
3512 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3513 if (len > M_TRAILINGSPACE(m_notify)) {
3514 /* never enough room */
3515 sctp_m_freem(m_notify);
3516 return;
3517 }
3518 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3519 memset(strreset, 0, len);
3520 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3521 strreset->strreset_flags = flag;
3522 strreset->strreset_length = len;
3523 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3524 if (number_entries) {
3525 int i;
3526
3527 for (i = 0; i < number_entries; i++) {
3528 strreset->strreset_stream_list[i] = ntohs(list[i]);
3529 }
3530 }
3531 SCTP_BUF_LEN(m_notify) = len;
3532 SCTP_BUF_NEXT(m_notify) = NULL;
3533 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3534 /* no space */
3535 sctp_m_freem(m_notify);
3536 return;
3537 }
3538 /* append to socket */
3539 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3540 0, 0, stcb->asoc.context, 0, 0, 0,
3541 m_notify);
3542 if (control == NULL) {
3543 /* no memory */
3544 sctp_m_freem(m_notify);
3545 return;
3546 }
3547 control->length = SCTP_BUF_LEN(m_notify);
3548 control->spec_flags = M_NOTIFICATION;
3549 /* not that we need this */
3550 control->tail_mbuf = m_notify;
3551 sctp_add_to_readq(stcb->sctp_ep, stcb,
3552 control,
3553 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3554 }
3555
3556
3557 static void
3558 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3559 {
3560 struct mbuf *m_notify;
3561 struct sctp_remote_error *sre;
3562 struct sctp_queued_to_read *control;
3563 unsigned int notif_len;
3564 uint16_t chunk_len;
3565
3566 if ((stcb == NULL) ||
3567 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3568 return;
3569 }
3570 if (chunk != NULL) {
3571 chunk_len = ntohs(chunk->ch.chunk_length);
3572 /*
3573 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3574 * contiguous.
3575 */
3576 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3577 chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3578 }
3579 } else {
3580 chunk_len = 0;
3581 }
3582 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3583 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3584 if (m_notify == NULL) {
3585 /* Retry with smaller value. */
3586 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3587 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3588 if (m_notify == NULL) {
3589 return;
3590 }
3591 }
3592 SCTP_BUF_NEXT(m_notify) = NULL;
3593 sre = mtod(m_notify, struct sctp_remote_error *);
3594 memset(sre, 0, notif_len);
3595 sre->sre_type = SCTP_REMOTE_ERROR;
3596 sre->sre_flags = 0;
3597 sre->sre_length = sizeof(struct sctp_remote_error);
3598 sre->sre_error = error;
3599 sre->sre_assoc_id = sctp_get_associd(stcb);
3600 if (notif_len > sizeof(struct sctp_remote_error)) {
3601 memcpy(sre->sre_data, chunk, chunk_len);
3602 sre->sre_length += chunk_len;
3603 }
3604 SCTP_BUF_LEN(m_notify) = sre->sre_length;
3605 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3606 0, 0, stcb->asoc.context, 0, 0, 0,
3607 m_notify);
3608 if (control != NULL) {
3609 control->length = SCTP_BUF_LEN(m_notify);
3610 control->spec_flags = M_NOTIFICATION;
3611 /* not that we need this */
3612 control->tail_mbuf = m_notify;
3613 sctp_add_to_readq(stcb->sctp_ep, stcb,
3614 control,
3615 &stcb->sctp_socket->so_rcv, 1,
3616 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3617 } else {
3618 sctp_m_freem(m_notify);
3619 }
3620 }
3621
3622
3623 void
3624 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3625 uint32_t error, void *data, int so_locked
3626 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3627 SCTP_UNUSED
3628 #endif
3629 )
3630 {
3631 if ((stcb == NULL) ||
3632 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3633 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3634 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3635 /* If the socket is gone we are out of here */
3636 return;
3637 }
3638 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3639 return;
3640 }
3641 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3642 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3643 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3644 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3645 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3646 /* Don't report these in front states */
3647 return;
3648 }
3649 }
3650 switch (notification) {
3651 case SCTP_NOTIFY_ASSOC_UP:
3652 if (stcb->asoc.assoc_up_sent == 0) {
3653 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3654 stcb->asoc.assoc_up_sent = 1;
3655 }
3656 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3657 sctp_notify_adaptation_layer(stcb);
3658 }
3659 if (stcb->asoc.auth_supported == 0) {
3660 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3661 NULL, so_locked);
3662 }
3663 break;
3664 case SCTP_NOTIFY_ASSOC_DOWN:
3665 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3666 break;
3667 case SCTP_NOTIFY_INTERFACE_DOWN:
3668 {
3669 struct sctp_nets *net;
3670
3671 net = (struct sctp_nets *)data;
3672 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3673 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3674 break;
3675 }
3676 case SCTP_NOTIFY_INTERFACE_UP:
3677 {
3678 struct sctp_nets *net;
3679
3680 net = (struct sctp_nets *)data;
3681 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3682 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3683 break;
3684 }
3685 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3686 {
3687 struct sctp_nets *net;
3688
3689 net = (struct sctp_nets *)data;
3690 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3691 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3692 break;
3693 }
3694 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3695 sctp_notify_send_failed2(stcb, error,
3696 (struct sctp_stream_queue_pending *)data, so_locked);
3697 break;
3698 case SCTP_NOTIFY_SENT_DG_FAIL:
3699 sctp_notify_send_failed(stcb, 1, error,
3700 (struct sctp_tmit_chunk *)data, so_locked);
3701 break;
3702 case SCTP_NOTIFY_UNSENT_DG_FAIL:
3703 sctp_notify_send_failed(stcb, 0, error,
3704 (struct sctp_tmit_chunk *)data, so_locked);
3705 break;
3706 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3707 {
3708 uint32_t val;
3709
3710 val = *((uint32_t *)data);
3711
3712 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3713 break;
3714 }
3715 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3716 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3717 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3718 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3719 } else {
3720 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3721 }
3722 break;
3723 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3724 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3725 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3726 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3727 } else {
3728 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3729 }
3730 break;
3731 case SCTP_NOTIFY_ASSOC_RESTART:
3732 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3733 if (stcb->asoc.auth_supported == 0) {
3734 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3735 NULL, so_locked);
3736 }
3737 break;
3738 case SCTP_NOTIFY_STR_RESET_SEND:
3739 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3740 break;
3741 case SCTP_NOTIFY_STR_RESET_RECV:
3742 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3743 break;
3744 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3745 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3746 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3747 break;
3748 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3749 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3750 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3751 break;
3752 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3753 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3754 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3755 break;
3756 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3757 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3758 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3759 break;
3760 case SCTP_NOTIFY_ASCONF_ADD_IP:
3761 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3762 error, so_locked);
3763 break;
3764 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3765 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3766 error, so_locked);
3767 break;
3768 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3769 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3770 error, so_locked);
3771 break;
3772 case SCTP_NOTIFY_PEER_SHUTDOWN:
3773 sctp_notify_shutdown_event(stcb);
3774 break;
3775 case SCTP_NOTIFY_AUTH_NEW_KEY:
3776 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3777 (uint16_t)(uintptr_t)data,
3778 so_locked);
3779 break;
3780 case SCTP_NOTIFY_AUTH_FREE_KEY:
3781 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3782 (uint16_t)(uintptr_t)data,
3783 so_locked);
3784 break;
3785 case SCTP_NOTIFY_NO_PEER_AUTH:
3786 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3787 (uint16_t)(uintptr_t)data,
3788 so_locked);
3789 break;
3790 case SCTP_NOTIFY_SENDER_DRY:
3791 sctp_notify_sender_dry_event(stcb, so_locked);
3792 break;
3793 case SCTP_NOTIFY_REMOTE_ERROR:
3794 sctp_notify_remote_error(stcb, error, data);
3795 break;
3796 default:
3797 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3798 __func__, notification, notification);
3799 break;
3800 } /* end switch */
3801 }
3802
3803 void
3804 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3805 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3806 SCTP_UNUSED
3807 #endif
3808 )
3809 {
3810 struct sctp_association *asoc;
3811 struct sctp_stream_out *outs;
3812 struct sctp_tmit_chunk *chk, *nchk;
3813 struct sctp_stream_queue_pending *sp, *nsp;
3814 int i;
3815
3816 if (stcb == NULL) {
3817 return;
3818 }
3819 asoc = &stcb->asoc;
3820 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3821 /* already being freed */
3822 return;
3823 }
3824 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3825 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3826 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3827 return;
3828 }
3829 /* now through all the gunk freeing chunks */
3830 if (holds_lock == 0) {
3831 SCTP_TCB_SEND_LOCK(stcb);
3832 }
3833 /* sent queue SHOULD be empty */
3834 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3835 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3836 asoc->sent_queue_cnt--;
3837 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3838 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3839 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3840 #ifdef INVARIANTS
3841 } else {
3842 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3843 #endif
3844 }
3845 }
3846 if (chk->data != NULL) {
3847 sctp_free_bufspace(stcb, asoc, chk, 1);
3848 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3849 error, chk, so_locked);
3850 if (chk->data) {
3851 sctp_m_freem(chk->data);
3852 chk->data = NULL;
3853 }
3854 }
3855 sctp_free_a_chunk(stcb, chk, so_locked);
3856 /* sa_ignore FREED_MEMORY */
3857 }
3858 /* pending send queue SHOULD be empty */
3859 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3860 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3861 asoc->send_queue_cnt--;
3862 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3863 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3864 #ifdef INVARIANTS
3865 } else {
3866 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3867 #endif
3868 }
3869 if (chk->data != NULL) {
3870 sctp_free_bufspace(stcb, asoc, chk, 1);
3871 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3872 error, chk, so_locked);
3873 if (chk->data) {
3874 sctp_m_freem(chk->data);
3875 chk->data = NULL;
3876 }
3877 }
3878 sctp_free_a_chunk(stcb, chk, so_locked);
3879 /* sa_ignore FREED_MEMORY */
3880 }
3881 for (i = 0; i < asoc->streamoutcnt; i++) {
3882 /* For each stream */
3883 outs = &asoc->strmout[i];
3884 /* clean up any sends there */
3885 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3886 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3887 TAILQ_REMOVE(&outs->outqueue, sp, next);
3888 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3889 sctp_free_spbufspace(stcb, asoc, sp);
3890 if (sp->data) {
3891 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3892 error, (void *)sp, so_locked);
3893 if (sp->data) {
3894 sctp_m_freem(sp->data);
3895 sp->data = NULL;
3896 sp->tail_mbuf = NULL;
3897 sp->length = 0;
3898 }
3899 }
3900 if (sp->net) {
3901 sctp_free_remote_addr(sp->net);
3902 sp->net = NULL;
3903 }
3904 /* Free the chunk */
3905 sctp_free_a_strmoq(stcb, sp, so_locked);
3906 /* sa_ignore FREED_MEMORY */
3907 }
3908 }
3909
3910 if (holds_lock == 0) {
3911 SCTP_TCB_SEND_UNLOCK(stcb);
3912 }
3913 }
3914
3915 void
3916 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3917 struct sctp_abort_chunk *abort, int so_locked
3918 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3919 SCTP_UNUSED
3920 #endif
3921 )
3922 {
3923 if (stcb == NULL) {
3924 return;
3925 }
3926 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3927 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3928 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3929 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3930 }
3931 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3932 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3933 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3934 return;
3935 }
3936 /* Tell them we lost the asoc */
3937 sctp_report_all_outbound(stcb, error, 1, so_locked);
3938 if (from_peer) {
3939 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3940 } else {
3941 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3942 }
3943 }
3944
3945 void
3946 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3947 struct mbuf *m, int iphlen,
3948 struct sockaddr *src, struct sockaddr *dst,
3949 struct sctphdr *sh, struct mbuf *op_err,
3950 uint8_t mflowtype, uint32_t mflowid,
3951 uint32_t vrf_id, uint16_t port)
3952 {
3953 uint32_t vtag;
3954 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3955 struct socket *so;
3956 #endif
3957
3958 vtag = 0;
3959 if (stcb != NULL) {
3960 vtag = stcb->asoc.peer_vtag;
3961 vrf_id = stcb->asoc.vrf_id;
3962 }
3963 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3964 mflowtype, mflowid, inp->fibnum,
3965 vrf_id, port);
3966 if (stcb != NULL) {
3967 /* We have a TCB to abort, send notification too */
3968 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3969 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3970 /* Ok, now lets free it */
3971 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3972 so = SCTP_INP_SO(inp);
3973 atomic_add_int(&stcb->asoc.refcnt, 1);
3974 SCTP_TCB_UNLOCK(stcb);
3975 SCTP_SOCKET_LOCK(so, 1);
3976 SCTP_TCB_LOCK(stcb);
3977 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3978 #endif
3979 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3980 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3981 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3982 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3983 }
3984 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3985 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3986 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3987 SCTP_SOCKET_UNLOCK(so, 1);
3988 #endif
3989 }
3990 }
3991 #ifdef SCTP_ASOCLOG_OF_TSNS
3992 void
3993 sctp_print_out_track_log(struct sctp_tcb *stcb)
3994 {
3995 #ifdef NOSIY_PRINTS
3996 int i;
3997
3998 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3999 SCTP_PRINTF("IN bound TSN log-aaa\n");
4000 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4001 SCTP_PRINTF("None rcvd\n");
4002 goto none_in;
4003 }
4004 if (stcb->asoc.tsn_in_wrapped) {
4005 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4006 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4007 stcb->asoc.in_tsnlog[i].tsn,
4008 stcb->asoc.in_tsnlog[i].strm,
4009 stcb->asoc.in_tsnlog[i].seq,
4010 stcb->asoc.in_tsnlog[i].flgs,
4011 stcb->asoc.in_tsnlog[i].sz);
4012 }
4013 }
4014 if (stcb->asoc.tsn_in_at) {
4015 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4016 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4017 stcb->asoc.in_tsnlog[i].tsn,
4018 stcb->asoc.in_tsnlog[i].strm,
4019 stcb->asoc.in_tsnlog[i].seq,
4020 stcb->asoc.in_tsnlog[i].flgs,
4021 stcb->asoc.in_tsnlog[i].sz);
4022 }
4023 }
4024 none_in:
4025 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4026 if ((stcb->asoc.tsn_out_at == 0) &&
4027 (stcb->asoc.tsn_out_wrapped == 0)) {
4028 SCTP_PRINTF("None sent\n");
4029 }
4030 if (stcb->asoc.tsn_out_wrapped) {
4031 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4032 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4033 stcb->asoc.out_tsnlog[i].tsn,
4034 stcb->asoc.out_tsnlog[i].strm,
4035 stcb->asoc.out_tsnlog[i].seq,
4036 stcb->asoc.out_tsnlog[i].flgs,
4037 stcb->asoc.out_tsnlog[i].sz);
4038 }
4039 }
4040 if (stcb->asoc.tsn_out_at) {
4041 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4042 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4043 stcb->asoc.out_tsnlog[i].tsn,
4044 stcb->asoc.out_tsnlog[i].strm,
4045 stcb->asoc.out_tsnlog[i].seq,
4046 stcb->asoc.out_tsnlog[i].flgs,
4047 stcb->asoc.out_tsnlog[i].sz);
4048 }
4049 }
4050 #endif
4051 }
4052 #endif
4053
4054 void
4055 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4056 struct mbuf *op_err,
4057 int so_locked
4058 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4059 SCTP_UNUSED
4060 #endif
4061 )
4062 {
4063 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4064 struct socket *so;
4065 #endif
4066
4067 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4068 so = SCTP_INP_SO(inp);
4069 #endif
4070 if (stcb == NULL) {
4071 /* Got to have a TCB */
4072 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4073 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4074 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4075 SCTP_CALLED_DIRECTLY_NOCMPSET);
4076 }
4077 }
4078 return;
4079 } else {
4080 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4081 }
4082 /* notify the peer */
4083 sctp_send_abort_tcb(stcb, op_err, so_locked);
4084 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4085 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4086 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4087 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4088 }
4089 /* notify the ulp */
4090 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4091 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4092 }
4093 /* now free the asoc */
4094 #ifdef SCTP_ASOCLOG_OF_TSNS
4095 sctp_print_out_track_log(stcb);
4096 #endif
4097 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4098 if (!so_locked) {
4099 atomic_add_int(&stcb->asoc.refcnt, 1);
4100 SCTP_TCB_UNLOCK(stcb);
4101 SCTP_SOCKET_LOCK(so, 1);
4102 SCTP_TCB_LOCK(stcb);
4103 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4104 }
4105 #endif
4106 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4107 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4108 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 if (!so_locked) {
4110 SCTP_SOCKET_UNLOCK(so, 1);
4111 }
4112 #endif
4113 }
4114
4115 void
4116 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4117 struct sockaddr *src, struct sockaddr *dst,
4118 struct sctphdr *sh, struct sctp_inpcb *inp,
4119 struct mbuf *cause,
4120 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4121 uint32_t vrf_id, uint16_t port)
4122 {
4123 struct sctp_chunkhdr *ch, chunk_buf;
4124 unsigned int chk_length;
4125 int contains_init_chunk;
4126
4127 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4128 /* Generate a TO address for future reference */
4129 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4130 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4131 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4132 SCTP_CALLED_DIRECTLY_NOCMPSET);
4133 }
4134 }
4135 contains_init_chunk = 0;
4136 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4137 sizeof(*ch), (uint8_t *)&chunk_buf);
4138 while (ch != NULL) {
4139 chk_length = ntohs(ch->chunk_length);
4140 if (chk_length < sizeof(*ch)) {
4141 /* break to abort land */
4142 break;
4143 }
4144 switch (ch->chunk_type) {
4145 case SCTP_INIT:
4146 contains_init_chunk = 1;
4147 break;
4148 case SCTP_PACKET_DROPPED:
4149 /* we don't respond to pkt-dropped */
4150 return;
4151 case SCTP_ABORT_ASSOCIATION:
4152 /* we don't respond with an ABORT to an ABORT */
4153 return;
4154 case SCTP_SHUTDOWN_COMPLETE:
4155 /*
4156 * we ignore it since we are not waiting for it and
4157 * peer is gone
4158 */
4159 return;
4160 case SCTP_SHUTDOWN_ACK:
4161 sctp_send_shutdown_complete2(src, dst, sh,
4162 mflowtype, mflowid, fibnum,
4163 vrf_id, port);
4164 return;
4165 default:
4166 break;
4167 }
4168 offset += SCTP_SIZE32(chk_length);
4169 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4170 sizeof(*ch), (uint8_t *)&chunk_buf);
4171 }
4172 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4173 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4174 (contains_init_chunk == 0))) {
4175 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4176 mflowtype, mflowid, fibnum,
4177 vrf_id, port);
4178 }
4179 }
4180
4181 /*
4182 * check the inbound datagram to make sure there is not an abort inside it,
4183 * if there is return 1, else return 0.
4184 */
4185 int
4186 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4187 {
4188 struct sctp_chunkhdr *ch;
4189 struct sctp_init_chunk *init_chk, chunk_buf;
4190 int offset;
4191 unsigned int chk_length;
4192
4193 offset = iphlen + sizeof(struct sctphdr);
4194 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4195 (uint8_t *)&chunk_buf);
4196 while (ch != NULL) {
4197 chk_length = ntohs(ch->chunk_length);
4198 if (chk_length < sizeof(*ch)) {
4199 /* packet is probably corrupt */
4200 break;
4201 }
4202 /* we seem to be ok, is it an abort? */
4203 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4204 /* yep, tell them */
4205 return (1);
4206 }
4207 if (ch->chunk_type == SCTP_INITIATION) {
4208 /* need to update the Vtag */
4209 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4210 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4211 if (init_chk != NULL) {
4212 *vtagfill = ntohl(init_chk->init.initiate_tag);
4213 }
4214 }
4215 /* Nope, move to the next chunk */
4216 offset += SCTP_SIZE32(chk_length);
4217 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4218 sizeof(*ch), (uint8_t *)&chunk_buf);
4219 }
4220 return (0);
4221 }
4222
4223 /*
4224 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4225 * set (i.e. it's 0) so, create this function to compare link local scopes
4226 */
4227 #ifdef INET6
4228 uint32_t
4229 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4230 {
4231 struct sockaddr_in6 a, b;
4232
4233 /* save copies */
4234 a = *addr1;
4235 b = *addr2;
4236
4237 if (a.sin6_scope_id == 0)
4238 if (sa6_recoverscope(&a)) {
4239 /* can't get scope, so can't match */
4240 return (0);
4241 }
4242 if (b.sin6_scope_id == 0)
4243 if (sa6_recoverscope(&b)) {
4244 /* can't get scope, so can't match */
4245 return (0);
4246 }
4247 if (a.sin6_scope_id != b.sin6_scope_id)
4248 return (0);
4249
4250 return (1);
4251 }
4252
4253 /*
4254 * returns a sockaddr_in6 with embedded scope recovered and removed
4255 */
4256 struct sockaddr_in6 *
4257 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4258 {
4259 /* check and strip embedded scope junk */
4260 if (addr->sin6_family == AF_INET6) {
4261 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4262 if (addr->sin6_scope_id == 0) {
4263 *store = *addr;
4264 if (!sa6_recoverscope(store)) {
4265 /* use the recovered scope */
4266 addr = store;
4267 }
4268 } else {
4269 /* else, return the original "to" addr */
4270 in6_clearscope(&addr->sin6_addr);
4271 }
4272 }
4273 }
4274 return (addr);
4275 }
4276 #endif
4277
4278 /*
4279 * are the two addresses the same? currently a "scopeless" check returns: 1
4280 * if same, 0 if not
4281 */
4282 int
4283 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4284 {
4285
4286 /* must be valid */
4287 if (sa1 == NULL || sa2 == NULL)
4288 return (0);
4289
4290 /* must be the same family */
4291 if (sa1->sa_family != sa2->sa_family)
4292 return (0);
4293
4294 switch (sa1->sa_family) {
4295 #ifdef INET6
4296 case AF_INET6:
4297 {
4298 /* IPv6 addresses */
4299 struct sockaddr_in6 *sin6_1, *sin6_2;
4300
4301 sin6_1 = (struct sockaddr_in6 *)sa1;
4302 sin6_2 = (struct sockaddr_in6 *)sa2;
4303 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4304 sin6_2));
4305 }
4306 #endif
4307 #ifdef INET
4308 case AF_INET:
4309 {
4310 /* IPv4 addresses */
4311 struct sockaddr_in *sin_1, *sin_2;
4312
4313 sin_1 = (struct sockaddr_in *)sa1;
4314 sin_2 = (struct sockaddr_in *)sa2;
4315 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4316 }
4317 #endif
4318 default:
4319 /* we don't do these... */
4320 return (0);
4321 }
4322 }
4323
4324 void
4325 sctp_print_address(struct sockaddr *sa)
4326 {
4327 #ifdef INET6
4328 char ip6buf[INET6_ADDRSTRLEN];
4329 #endif
4330
4331 switch (sa->sa_family) {
4332 #ifdef INET6
4333 case AF_INET6:
4334 {
4335 struct sockaddr_in6 *sin6;
4336
4337 sin6 = (struct sockaddr_in6 *)sa;
4338 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4339 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4340 ntohs(sin6->sin6_port),
4341 sin6->sin6_scope_id);
4342 break;
4343 }
4344 #endif
4345 #ifdef INET
4346 case AF_INET:
4347 {
4348 struct sockaddr_in *sin;
4349 unsigned char *p;
4350
4351 sin = (struct sockaddr_in *)sa;
4352 p = (unsigned char *)&sin->sin_addr;
4353 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4354 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4355 break;
4356 }
4357 #endif
4358 default:
4359 SCTP_PRINTF("?\n");
4360 break;
4361 }
4362 }
4363
4364 void
4365 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4366 struct sctp_inpcb *new_inp,
4367 struct sctp_tcb *stcb,
4368 int waitflags)
4369 {
4370 /*
4371 * go through our old INP and pull off any control structures that
4372 * belong to stcb and move then to the new inp.
4373 */
4374 struct socket *old_so, *new_so;
4375 struct sctp_queued_to_read *control, *nctl;
4376 struct sctp_readhead tmp_queue;
4377 struct mbuf *m;
4378 int error = 0;
4379
4380 old_so = old_inp->sctp_socket;
4381 new_so = new_inp->sctp_socket;
4382 TAILQ_INIT(&tmp_queue);
4383 error = sblock(&old_so->so_rcv, waitflags);
4384 if (error) {
4385 /*
4386 * Gak, can't get sblock, we have a problem. data will be
4387 * left stranded.. and we don't dare look at it since the
4388 * other thread may be reading something. Oh well, its a
4389 * screwed up app that does a peeloff OR a accept while
4390 * reading from the main socket... actually its only the
4391 * peeloff() case, since I think read will fail on a
4392 * listening socket..
4393 */
4394 return;
4395 }
4396 /* lock the socket buffers */
4397 SCTP_INP_READ_LOCK(old_inp);
4398 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4399 /* Pull off all for out target stcb */
4400 if (control->stcb == stcb) {
4401 /* remove it we want it */
4402 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4403 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4404 m = control->data;
4405 while (m) {
4406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4407 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4408 }
4409 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4411 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4412 }
4413 m = SCTP_BUF_NEXT(m);
4414 }
4415 }
4416 }
4417 SCTP_INP_READ_UNLOCK(old_inp);
4418 /* Remove the sb-lock on the old socket */
4419
4420 sbunlock(&old_so->so_rcv);
4421 /* Now we move them over to the new socket buffer */
4422 SCTP_INP_READ_LOCK(new_inp);
4423 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4424 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4425 m = control->data;
4426 while (m) {
4427 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4428 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4429 }
4430 sctp_sballoc(stcb, &new_so->so_rcv, m);
4431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4432 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4433 }
4434 m = SCTP_BUF_NEXT(m);
4435 }
4436 }
4437 SCTP_INP_READ_UNLOCK(new_inp);
4438 }
4439
4440 void
4441 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4442 struct sctp_tcb *stcb,
4443 int so_locked
4444 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4445 SCTP_UNUSED
4446 #endif
4447 )
4448 {
4449 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4450 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4451 struct socket *so;
4452
4453 so = SCTP_INP_SO(inp);
4454 if (!so_locked) {
4455 if (stcb) {
4456 atomic_add_int(&stcb->asoc.refcnt, 1);
4457 SCTP_TCB_UNLOCK(stcb);
4458 }
4459 SCTP_SOCKET_LOCK(so, 1);
4460 if (stcb) {
4461 SCTP_TCB_LOCK(stcb);
4462 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4463 }
4464 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4465 SCTP_SOCKET_UNLOCK(so, 1);
4466 return;
4467 }
4468 }
4469 #endif
4470 sctp_sorwakeup(inp, inp->sctp_socket);
4471 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4472 if (!so_locked) {
4473 SCTP_SOCKET_UNLOCK(so, 1);
4474 }
4475 #endif
4476 }
4477 }
4478
4479 void
4480 sctp_add_to_readq(struct sctp_inpcb *inp,
4481 struct sctp_tcb *stcb,
4482 struct sctp_queued_to_read *control,
4483 struct sockbuf *sb,
4484 int end,
4485 int inp_read_lock_held,
4486 int so_locked
4487 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4488 SCTP_UNUSED
4489 #endif
4490 )
4491 {
4492 /*
4493 * Here we must place the control on the end of the socket read
4494 * queue AND increment sb_cc so that select will work properly on
4495 * read.
4496 */
4497 struct mbuf *m, *prev = NULL;
4498
4499 if (inp == NULL) {
4500 /* Gak, TSNH!! */
4501 #ifdef INVARIANTS
4502 panic("Gak, inp NULL on add_to_readq");
4503 #endif
4504 return;
4505 }
4506 if (inp_read_lock_held == 0)
4507 SCTP_INP_READ_LOCK(inp);
4508 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4509 sctp_free_remote_addr(control->whoFrom);
4510 if (control->data) {
4511 sctp_m_freem(control->data);
4512 control->data = NULL;
4513 }
4514 sctp_free_a_readq(stcb, control);
4515 if (inp_read_lock_held == 0)
4516 SCTP_INP_READ_UNLOCK(inp);
4517 return;
4518 }
4519 if (!(control->spec_flags & M_NOTIFICATION)) {
4520 atomic_add_int(&inp->total_recvs, 1);
4521 if (!control->do_not_ref_stcb) {
4522 atomic_add_int(&stcb->total_recvs, 1);
4523 }
4524 }
4525 m = control->data;
4526 control->held_length = 0;
4527 control->length = 0;
4528 while (m) {
4529 if (SCTP_BUF_LEN(m) == 0) {
4530 /* Skip mbufs with NO length */
4531 if (prev == NULL) {
4532 /* First one */
4533 control->data = sctp_m_free(m);
4534 m = control->data;
4535 } else {
4536 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4537 m = SCTP_BUF_NEXT(prev);
4538 }
4539 if (m == NULL) {
4540 control->tail_mbuf = prev;
4541 }
4542 continue;
4543 }
4544 prev = m;
4545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4546 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4547 }
4548 sctp_sballoc(stcb, sb, m);
4549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4550 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4551 }
4552 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4553 m = SCTP_BUF_NEXT(m);
4554 }
4555 if (prev != NULL) {
4556 control->tail_mbuf = prev;
4557 } else {
4558 /* Everything got collapsed out?? */
4559 sctp_free_remote_addr(control->whoFrom);
4560 sctp_free_a_readq(stcb, control);
4561 if (inp_read_lock_held == 0)
4562 SCTP_INP_READ_UNLOCK(inp);
4563 return;
4564 }
4565 if (end) {
4566 control->end_added = 1;
4567 }
4568 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4569 control->on_read_q = 1;
4570 if (inp_read_lock_held == 0)
4571 SCTP_INP_READ_UNLOCK(inp);
4572 if (inp && inp->sctp_socket) {
4573 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4574 }
4575 }
4576
4577 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4578 *************ALTERNATE ROUTING CODE
4579 */
4580
4581 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4582 *************ALTERNATE ROUTING CODE
4583 */
4584
4585 struct mbuf *
4586 sctp_generate_cause(uint16_t code, char *info)
4587 {
4588 struct mbuf *m;
4589 struct sctp_gen_error_cause *cause;
4590 size_t info_len;
4591 uint16_t len;
4592
4593 if ((code == 0) || (info == NULL)) {
4594 return (NULL);
4595 }
4596 info_len = strlen(info);
4597 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4598 return (NULL);
4599 }
4600 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4601 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4602 if (m != NULL) {
4603 SCTP_BUF_LEN(m) = len;
4604 cause = mtod(m, struct sctp_gen_error_cause *);
4605 cause->code = htons(code);
4606 cause->length = htons(len);
4607 memcpy(cause->info, info, info_len);
4608 }
4609 return (m);
4610 }
4611
4612 struct mbuf *
4613 sctp_generate_no_user_data_cause(uint32_t tsn)
4614 {
4615 struct mbuf *m;
4616 struct sctp_error_no_user_data *no_user_data_cause;
4617 uint16_t len;
4618
4619 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4620 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4621 if (m != NULL) {
4622 SCTP_BUF_LEN(m) = len;
4623 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4624 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4625 no_user_data_cause->cause.length = htons(len);
4626 no_user_data_cause->tsn = htonl(tsn);
4627 }
4628 return (m);
4629 }
4630
4631 #ifdef SCTP_MBCNT_LOGGING
4632 void
4633 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4634 struct sctp_tmit_chunk *tp1, int chk_cnt)
4635 {
4636 if (tp1->data == NULL) {
4637 return;
4638 }
4639 asoc->chunks_on_out_queue -= chk_cnt;
4640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4641 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4642 asoc->total_output_queue_size,
4643 tp1->book_size,
4644 0,
4645 tp1->mbcnt);
4646 }
4647 if (asoc->total_output_queue_size >= tp1->book_size) {
4648 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4649 } else {
4650 asoc->total_output_queue_size = 0;
4651 }
4652
4653 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4654 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4655 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4656 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4657 } else {
4658 stcb->sctp_socket->so_snd.sb_cc = 0;
4659
4660 }
4661 }
4662 }
4663
4664 #endif
4665
4666 int
4667 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4668 uint8_t sent, int so_locked
4669 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4670 SCTP_UNUSED
4671 #endif
4672 )
4673 {
4674 struct sctp_stream_out *strq;
4675 struct sctp_tmit_chunk *chk = NULL, *tp2;
4676 struct sctp_stream_queue_pending *sp;
4677 uint32_t mid;
4678 uint16_t sid;
4679 uint8_t foundeom = 0;
4680 int ret_sz = 0;
4681 int notdone;
4682 int do_wakeup_routine = 0;
4683
4684 sid = tp1->rec.data.sid;
4685 mid = tp1->rec.data.mid;
4686 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4687 stcb->asoc.abandoned_sent[0]++;
4688 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4689 stcb->asoc.strmout[sid].abandoned_sent[0]++;
4690 #if defined(SCTP_DETAILED_STR_STATS)
4691 stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4692 #endif
4693 } else {
4694 stcb->asoc.abandoned_unsent[0]++;
4695 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4696 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4697 #if defined(SCTP_DETAILED_STR_STATS)
4698 stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4699 #endif
4700 }
4701 do {
4702 ret_sz += tp1->book_size;
4703 if (tp1->data != NULL) {
4704 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4705 sctp_flight_size_decrease(tp1);
4706 sctp_total_flight_decrease(stcb, tp1);
4707 }
4708 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4709 stcb->asoc.peers_rwnd += tp1->send_size;
4710 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4711 if (sent) {
4712 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4713 } else {
4714 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4715 }
4716 if (tp1->data) {
4717 sctp_m_freem(tp1->data);
4718 tp1->data = NULL;
4719 }
4720 do_wakeup_routine = 1;
4721 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4722 stcb->asoc.sent_queue_cnt_removeable--;
4723 }
4724 }
4725 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4726 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4727 SCTP_DATA_NOT_FRAG) {
4728 /* not frag'ed we ae done */
4729 notdone = 0;
4730 foundeom = 1;
4731 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4732 /* end of frag, we are done */
4733 notdone = 0;
4734 foundeom = 1;
4735 } else {
4736 /*
4737 * Its a begin or middle piece, we must mark all of
4738 * it
4739 */
4740 notdone = 1;
4741 tp1 = TAILQ_NEXT(tp1, sctp_next);
4742 }
4743 } while (tp1 && notdone);
4744 if (foundeom == 0) {
4745 /*
4746 * The multi-part message was scattered across the send and
4747 * sent queue.
4748 */
4749 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4750 if ((tp1->rec.data.sid != sid) ||
4751 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4752 break;
4753 }
4754 /*
4755 * save to chk in case we have some on stream out
4756 * queue. If so and we have an un-transmitted one we
4757 * don't have to fudge the TSN.
4758 */
4759 chk = tp1;
4760 ret_sz += tp1->book_size;
4761 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4762 if (sent) {
4763 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4764 } else {
4765 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4766 }
4767 if (tp1->data) {
4768 sctp_m_freem(tp1->data);
4769 tp1->data = NULL;
4770 }
4771 /* No flight involved here book the size to 0 */
4772 tp1->book_size = 0;
4773 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4774 foundeom = 1;
4775 }
4776 do_wakeup_routine = 1;
4777 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4778 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4779 /*
4780 * on to the sent queue so we can wait for it to be
4781 * passed by.
4782 */
4783 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4784 sctp_next);
4785 stcb->asoc.send_queue_cnt--;
4786 stcb->asoc.sent_queue_cnt++;
4787 }
4788 }
4789 if (foundeom == 0) {
4790 /*
4791 * Still no eom found. That means there is stuff left on the
4792 * stream out queue.. yuck.
4793 */
4794 SCTP_TCB_SEND_LOCK(stcb);
4795 strq = &stcb->asoc.strmout[sid];
4796 sp = TAILQ_FIRST(&strq->outqueue);
4797 if (sp != NULL) {
4798 sp->discard_rest = 1;
4799 /*
4800 * We may need to put a chunk on the queue that
4801 * holds the TSN that would have been sent with the
4802 * LAST bit.
4803 */
4804 if (chk == NULL) {
4805 /* Yep, we have to */
4806 sctp_alloc_a_chunk(stcb, chk);
4807 if (chk == NULL) {
4808 /*
4809 * we are hosed. All we can do is
4810 * nothing.. which will cause an
4811 * abort if the peer is paying
4812 * attention.
4813 */
4814 goto oh_well;
4815 }
4816 memset(chk, 0, sizeof(*chk));
4817 chk->rec.data.rcv_flags = 0;
4818 chk->sent = SCTP_FORWARD_TSN_SKIP;
4819 chk->asoc = &stcb->asoc;
4820 if (stcb->asoc.idata_supported == 0) {
4821 if (sp->sinfo_flags & SCTP_UNORDERED) {
4822 chk->rec.data.mid = 0;
4823 } else {
4824 chk->rec.data.mid = strq->next_mid_ordered;
4825 }
4826 } else {
4827 if (sp->sinfo_flags & SCTP_UNORDERED) {
4828 chk->rec.data.mid = strq->next_mid_unordered;
4829 } else {
4830 chk->rec.data.mid = strq->next_mid_ordered;
4831 }
4832 }
4833 chk->rec.data.sid = sp->sid;
4834 chk->rec.data.ppid = sp->ppid;
4835 chk->rec.data.context = sp->context;
4836 chk->flags = sp->act_flags;
4837 chk->whoTo = NULL;
4838 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4839 strq->chunks_on_queues++;
4840 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4841 stcb->asoc.sent_queue_cnt++;
4842 stcb->asoc.pr_sctp_cnt++;
4843 }
4844 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4845 if (sp->sinfo_flags & SCTP_UNORDERED) {
4846 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4847 }
4848 if (stcb->asoc.idata_supported == 0) {
4849 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4850 strq->next_mid_ordered++;
4851 }
4852 } else {
4853 if (sp->sinfo_flags & SCTP_UNORDERED) {
4854 strq->next_mid_unordered++;
4855 } else {
4856 strq->next_mid_ordered++;
4857 }
4858 }
4859 oh_well:
4860 if (sp->data) {
4861 /*
4862 * Pull any data to free up the SB and allow
4863 * sender to "add more" while we will throw
4864 * away :-)
4865 */
4866 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4867 ret_sz += sp->length;
4868 do_wakeup_routine = 1;
4869 sp->some_taken = 1;
4870 sctp_m_freem(sp->data);
4871 sp->data = NULL;
4872 sp->tail_mbuf = NULL;
4873 sp->length = 0;
4874 }
4875 }
4876 SCTP_TCB_SEND_UNLOCK(stcb);
4877 }
4878 if (do_wakeup_routine) {
4879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4880 struct socket *so;
4881
4882 so = SCTP_INP_SO(stcb->sctp_ep);
4883 if (!so_locked) {
4884 atomic_add_int(&stcb->asoc.refcnt, 1);
4885 SCTP_TCB_UNLOCK(stcb);
4886 SCTP_SOCKET_LOCK(so, 1);
4887 SCTP_TCB_LOCK(stcb);
4888 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4889 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4890 /* assoc was freed while we were unlocked */
4891 SCTP_SOCKET_UNLOCK(so, 1);
4892 return (ret_sz);
4893 }
4894 }
4895 #endif
4896 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4897 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4898 if (!so_locked) {
4899 SCTP_SOCKET_UNLOCK(so, 1);
4900 }
4901 #endif
4902 }
4903 return (ret_sz);
4904 }
4905
4906 /*
4907 * checks to see if the given address, sa, is one that is currently known by
4908 * the kernel note: can't distinguish the same address on multiple interfaces
4909 * and doesn't handle multiple addresses with different zone/scope id's note:
4910 * ifa_ifwithaddr() compares the entire sockaddr struct
4911 */
4912 struct sctp_ifa *
4913 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4914 int holds_lock)
4915 {
4916 struct sctp_laddr *laddr;
4917
4918 if (holds_lock == 0) {
4919 SCTP_INP_RLOCK(inp);
4920 }
4921 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4922 if (laddr->ifa == NULL)
4923 continue;
4924 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4925 continue;
4926 #ifdef INET
4927 if (addr->sa_family == AF_INET) {
4928 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4929 laddr->ifa->address.sin.sin_addr.s_addr) {
4930 /* found him. */
4931 if (holds_lock == 0) {
4932 SCTP_INP_RUNLOCK(inp);
4933 }
4934 return (laddr->ifa);
4935 break;
4936 }
4937 }
4938 #endif
4939 #ifdef INET6
4940 if (addr->sa_family == AF_INET6) {
4941 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4942 &laddr->ifa->address.sin6)) {
4943 /* found him. */
4944 if (holds_lock == 0) {
4945 SCTP_INP_RUNLOCK(inp);
4946 }
4947 return (laddr->ifa);
4948 break;
4949 }
4950 }
4951 #endif
4952 }
4953 if (holds_lock == 0) {
4954 SCTP_INP_RUNLOCK(inp);
4955 }
4956 return (NULL);
4957 }
4958
4959 uint32_t
4960 sctp_get_ifa_hash_val(struct sockaddr *addr)
4961 {
4962 switch (addr->sa_family) {
4963 #ifdef INET
4964 case AF_INET:
4965 {
4966 struct sockaddr_in *sin;
4967
4968 sin = (struct sockaddr_in *)addr;
4969 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4970 }
4971 #endif
4972 #ifdef INET6
4973 case AF_INET6:
4974 {
4975 struct sockaddr_in6 *sin6;
4976 uint32_t hash_of_addr;
4977
4978 sin6 = (struct sockaddr_in6 *)addr;
4979 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4980 sin6->sin6_addr.s6_addr32[1] +
4981 sin6->sin6_addr.s6_addr32[2] +
4982 sin6->sin6_addr.s6_addr32[3]);
4983 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4984 return (hash_of_addr);
4985 }
4986 #endif
4987 default:
4988 break;
4989 }
4990 return (0);
4991 }
4992
4993 struct sctp_ifa *
4994 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4995 {
4996 struct sctp_ifa *sctp_ifap;
4997 struct sctp_vrf *vrf;
4998 struct sctp_ifalist *hash_head;
4999 uint32_t hash_of_addr;
5000
5001 if (holds_lock == 0)
5002 SCTP_IPI_ADDR_RLOCK();
5003
5004 vrf = sctp_find_vrf(vrf_id);
5005 if (vrf == NULL) {
5006 if (holds_lock == 0)
5007 SCTP_IPI_ADDR_RUNLOCK();
5008 return (NULL);
5009 }
5010 hash_of_addr = sctp_get_ifa_hash_val(addr);
5011
5012 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5013 if (hash_head == NULL) {
5014 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5015 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5016 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5017 sctp_print_address(addr);
5018 SCTP_PRINTF("No such bucket for address\n");
5019 if (holds_lock == 0)
5020 SCTP_IPI_ADDR_RUNLOCK();
5021
5022 return (NULL);
5023 }
5024 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5025 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5026 continue;
5027 #ifdef INET
5028 if (addr->sa_family == AF_INET) {
5029 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5030 sctp_ifap->address.sin.sin_addr.s_addr) {
5031 /* found him. */
5032 if (holds_lock == 0)
5033 SCTP_IPI_ADDR_RUNLOCK();
5034 return (sctp_ifap);
5035 break;
5036 }
5037 }
5038 #endif
5039 #ifdef INET6
5040 if (addr->sa_family == AF_INET6) {
5041 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5042 &sctp_ifap->address.sin6)) {
5043 /* found him. */
5044 if (holds_lock == 0)
5045 SCTP_IPI_ADDR_RUNLOCK();
5046 return (sctp_ifap);
5047 break;
5048 }
5049 }
5050 #endif
5051 }
5052 if (holds_lock == 0)
5053 SCTP_IPI_ADDR_RUNLOCK();
5054 return (NULL);
5055 }
5056
5057 static void
5058 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5059 uint32_t rwnd_req)
5060 {
5061 /* User pulled some data, do we need a rwnd update? */
5062 int r_unlocked = 0;
5063 uint32_t dif, rwnd;
5064 struct socket *so = NULL;
5065
5066 if (stcb == NULL)
5067 return;
5068
5069 atomic_add_int(&stcb->asoc.refcnt, 1);
5070
5071 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5072 SCTP_STATE_SHUTDOWN_RECEIVED |
5073 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5074 /* Pre-check If we are freeing no update */
5075 goto no_lock;
5076 }
5077 SCTP_INP_INCR_REF(stcb->sctp_ep);
5078 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5079 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5080 goto out;
5081 }
5082 so = stcb->sctp_socket;
5083 if (so == NULL) {
5084 goto out;
5085 }
5086 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5087 /* Have you have freed enough to look */
5088 *freed_so_far = 0;
5089 /* Yep, its worth a look and the lock overhead */
5090
5091 /* Figure out what the rwnd would be */
5092 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5093 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5094 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5095 } else {
5096 dif = 0;
5097 }
5098 if (dif >= rwnd_req) {
5099 if (hold_rlock) {
5100 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5101 r_unlocked = 1;
5102 }
5103 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5104 /*
5105 * One last check before we allow the guy possibly
5106 * to get in. There is a race, where the guy has not
5107 * reached the gate. In that case
5108 */
5109 goto out;
5110 }
5111 SCTP_TCB_LOCK(stcb);
5112 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5113 /* No reports here */
5114 SCTP_TCB_UNLOCK(stcb);
5115 goto out;
5116 }
5117 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5118 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5119
5120 sctp_chunk_output(stcb->sctp_ep, stcb,
5121 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5122 /* make sure no timer is running */
5123 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5124 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5125 SCTP_TCB_UNLOCK(stcb);
5126 } else {
5127 /* Update how much we have pending */
5128 stcb->freed_by_sorcv_sincelast = dif;
5129 }
5130 out:
5131 if (so && r_unlocked && hold_rlock) {
5132 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5133 }
5134 SCTP_INP_DECR_REF(stcb->sctp_ep);
5135 no_lock:
5136 atomic_add_int(&stcb->asoc.refcnt, -1);
5137 return;
5138 }
5139
5140 int
5141 sctp_sorecvmsg(struct socket *so,
5142 struct uio *uio,
5143 struct mbuf **mp,
5144 struct sockaddr *from,
5145 int fromlen,
5146 int *msg_flags,
5147 struct sctp_sndrcvinfo *sinfo,
5148 int filling_sinfo)
5149 {
5150 /*
5151 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5152 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5153 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5154 * On the way out we may send out any combination of:
5155 * MSG_NOTIFICATION MSG_EOR
5156 *
5157 */
5158 struct sctp_inpcb *inp = NULL;
5159 int my_len = 0;
5160 int cp_len = 0, error = 0;
5161 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5162 struct mbuf *m = NULL;
5163 struct sctp_tcb *stcb = NULL;
5164 int wakeup_read_socket = 0;
5165 int freecnt_applied = 0;
5166 int out_flags = 0, in_flags = 0;
5167 int block_allowed = 1;
5168 uint32_t freed_so_far = 0;
5169 uint32_t copied_so_far = 0;
5170 int in_eeor_mode = 0;
5171 int no_rcv_needed = 0;
5172 uint32_t rwnd_req = 0;
5173 int hold_sblock = 0;
5174 int hold_rlock = 0;
5175 ssize_t slen = 0;
5176 uint32_t held_length = 0;
5177 int sockbuf_lock = 0;
5178
5179 if (uio == NULL) {
5180 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5181 return (EINVAL);
5182 }
5183 if (msg_flags) {
5184 in_flags = *msg_flags;
5185 if (in_flags & MSG_PEEK)
5186 SCTP_STAT_INCR(sctps_read_peeks);
5187 } else {
5188 in_flags = 0;
5189 }
5190 slen = uio->uio_resid;
5191
5192 /* Pull in and set up our int flags */
5193 if (in_flags & MSG_OOB) {
5194 /* Out of band's NOT supported */
5195 return (EOPNOTSUPP);
5196 }
5197 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5198 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5199 return (EINVAL);
5200 }
5201 if ((in_flags & (MSG_DONTWAIT
5202 | MSG_NBIO
5203 )) ||
5204 SCTP_SO_IS_NBIO(so)) {
5205 block_allowed = 0;
5206 }
5207 /* setup the endpoint */
5208 inp = (struct sctp_inpcb *)so->so_pcb;
5209 if (inp == NULL) {
5210 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5211 return (EFAULT);
5212 }
5213 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5214 /* Must be at least a MTU's worth */
5215 if (rwnd_req < SCTP_MIN_RWND)
5216 rwnd_req = SCTP_MIN_RWND;
5217 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5219 sctp_misc_ints(SCTP_SORECV_ENTER,
5220 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5221 }
5222 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5223 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5224 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5225 }
5226 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5227 if (error) {
5228 goto release_unlocked;
5229 }
5230 sockbuf_lock = 1;
5231 restart:
5232
5233
5234 restart_nosblocks:
5235 if (hold_sblock == 0) {
5236 SOCKBUF_LOCK(&so->so_rcv);
5237 hold_sblock = 1;
5238 }
5239 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5240 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5241 goto out;
5242 }
5243 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5244 if (so->so_error) {
5245 error = so->so_error;
5246 if ((in_flags & MSG_PEEK) == 0)
5247 so->so_error = 0;
5248 goto out;
5249 } else {
5250 if (so->so_rcv.sb_cc == 0) {
5251 /* indicate EOF */
5252 error = 0;
5253 goto out;
5254 }
5255 }
5256 }
5257 if (so->so_rcv.sb_cc <= held_length) {
5258 if (so->so_error) {
5259 error = so->so_error;
5260 if ((in_flags & MSG_PEEK) == 0) {
5261 so->so_error = 0;
5262 }
5263 goto out;
5264 }
5265 if ((so->so_rcv.sb_cc == 0) &&
5266 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5267 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5268 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5269 /*
5270 * For active open side clear flags for
5271 * re-use passive open is blocked by
5272 * connect.
5273 */
5274 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5275 /*
5276 * You were aborted, passive side
5277 * always hits here
5278 */
5279 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5280 error = ECONNRESET;
5281 }
5282 so->so_state &= ~(SS_ISCONNECTING |
5283 SS_ISDISCONNECTING |
5284 SS_ISCONFIRMING |
5285 SS_ISCONNECTED);
5286 if (error == 0) {
5287 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5288 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5289 error = ENOTCONN;
5290 }
5291 }
5292 goto out;
5293 }
5294 }
5295 if (block_allowed) {
5296 error = sbwait(&so->so_rcv);
5297 if (error) {
5298 goto out;
5299 }
5300 held_length = 0;
5301 goto restart_nosblocks;
5302 } else {
5303 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5304 error = EWOULDBLOCK;
5305 goto out;
5306 }
5307 }
5308 if (hold_sblock == 1) {
5309 SOCKBUF_UNLOCK(&so->so_rcv);
5310 hold_sblock = 0;
5311 }
5312 /* we possibly have data we can read */
5313 /* sa_ignore FREED_MEMORY */
5314 control = TAILQ_FIRST(&inp->read_queue);
5315 if (control == NULL) {
5316 /*
5317 * This could be happening since the appender did the
5318 * increment but as not yet did the tailq insert onto the
5319 * read_queue
5320 */
5321 if (hold_rlock == 0) {
5322 SCTP_INP_READ_LOCK(inp);
5323 }
5324 control = TAILQ_FIRST(&inp->read_queue);
5325 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5326 #ifdef INVARIANTS
5327 panic("Huh, its non zero and nothing on control?");
5328 #endif
5329 so->so_rcv.sb_cc = 0;
5330 }
5331 SCTP_INP_READ_UNLOCK(inp);
5332 hold_rlock = 0;
5333 goto restart;
5334 }
5335 if ((control->length == 0) &&
5336 (control->do_not_ref_stcb)) {
5337 /*
5338 * Clean up code for freeing assoc that left behind a
5339 * pdapi.. maybe a peer in EEOR that just closed after
5340 * sending and never indicated a EOR.
5341 */
5342 if (hold_rlock == 0) {
5343 hold_rlock = 1;
5344 SCTP_INP_READ_LOCK(inp);
5345 }
5346 control->held_length = 0;
5347 if (control->data) {
5348 /* Hmm there is data here .. fix */
5349 struct mbuf *m_tmp;
5350 int cnt = 0;
5351
5352 m_tmp = control->data;
5353 while (m_tmp) {
5354 cnt += SCTP_BUF_LEN(m_tmp);
5355 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5356 control->tail_mbuf = m_tmp;
5357 control->end_added = 1;
5358 }
5359 m_tmp = SCTP_BUF_NEXT(m_tmp);
5360 }
5361 control->length = cnt;
5362 } else {
5363 /* remove it */
5364 TAILQ_REMOVE(&inp->read_queue, control, next);
5365 /* Add back any hiddend data */
5366 sctp_free_remote_addr(control->whoFrom);
5367 sctp_free_a_readq(stcb, control);
5368 }
5369 if (hold_rlock) {
5370 hold_rlock = 0;
5371 SCTP_INP_READ_UNLOCK(inp);
5372 }
5373 goto restart;
5374 }
5375 if ((control->length == 0) &&
5376 (control->end_added == 1)) {
5377 /*
5378 * Do we also need to check for (control->pdapi_aborted ==
5379 * 1)?
5380 */
5381 if (hold_rlock == 0) {
5382 hold_rlock = 1;
5383 SCTP_INP_READ_LOCK(inp);
5384 }
5385 TAILQ_REMOVE(&inp->read_queue, control, next);
5386 if (control->data) {
5387 #ifdef INVARIANTS
5388 panic("control->data not null but control->length == 0");
5389 #else
5390 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5391 sctp_m_freem(control->data);
5392 control->data = NULL;
5393 #endif
5394 }
5395 if (control->aux_data) {
5396 sctp_m_free(control->aux_data);
5397 control->aux_data = NULL;
5398 }
5399 #ifdef INVARIANTS
5400 if (control->on_strm_q) {
5401 panic("About to free ctl:%p so:%p and its in %d",
5402 control, so, control->on_strm_q);
5403 }
5404 #endif
5405 sctp_free_remote_addr(control->whoFrom);
5406 sctp_free_a_readq(stcb, control);
5407 if (hold_rlock) {
5408 hold_rlock = 0;
5409 SCTP_INP_READ_UNLOCK(inp);
5410 }
5411 goto restart;
5412 }
5413 if (control->length == 0) {
5414 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5415 (filling_sinfo)) {
5416 /* find a more suitable one then this */
5417 ctl = TAILQ_NEXT(control, next);
5418 while (ctl) {
5419 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5420 (ctl->some_taken ||
5421 (ctl->spec_flags & M_NOTIFICATION) ||
5422 ((ctl->do_not_ref_stcb == 0) &&
5423 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5424 ) {
5425 /*-
5426 * If we have a different TCB next, and there is data
5427 * present. If we have already taken some (pdapi), OR we can
5428 * ref the tcb and no delivery as started on this stream, we
5429 * take it. Note we allow a notification on a different
5430 * assoc to be delivered..
5431 */
5432 control = ctl;
5433 goto found_one;
5434 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5435 (ctl->length) &&
5436 ((ctl->some_taken) ||
5437 ((ctl->do_not_ref_stcb == 0) &&
5438 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5439 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5440 /*-
5441 * If we have the same tcb, and there is data present, and we
5442 * have the strm interleave feature present. Then if we have
5443 * taken some (pdapi) or we can refer to tht tcb AND we have
5444 * not started a delivery for this stream, we can take it.
5445 * Note we do NOT allow a notificaiton on the same assoc to
5446 * be delivered.
5447 */
5448 control = ctl;
5449 goto found_one;
5450 }
5451 ctl = TAILQ_NEXT(ctl, next);
5452 }
5453 }
5454 /*
5455 * if we reach here, not suitable replacement is available
5456 * <or> fragment interleave is NOT on. So stuff the sb_cc
5457 * into the our held count, and its time to sleep again.
5458 */
5459 held_length = so->so_rcv.sb_cc;
5460 control->held_length = so->so_rcv.sb_cc;
5461 goto restart;
5462 }
5463 /* Clear the held length since there is something to read */
5464 control->held_length = 0;
5465 found_one:
5466 /*
5467 * If we reach here, control has a some data for us to read off.
5468 * Note that stcb COULD be NULL.
5469 */
5470 if (hold_rlock == 0) {
5471 hold_rlock = 1;
5472 SCTP_INP_READ_LOCK(inp);
5473 }
5474 control->some_taken++;
5475 stcb = control->stcb;
5476 if (stcb) {
5477 if ((control->do_not_ref_stcb == 0) &&
5478 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5479 if (freecnt_applied == 0)
5480 stcb = NULL;
5481 } else if (control->do_not_ref_stcb == 0) {
5482 /* you can't free it on me please */
5483 /*
5484 * The lock on the socket buffer protects us so the
5485 * free code will stop. But since we used the
5486 * socketbuf lock and the sender uses the tcb_lock
5487 * to increment, we need to use the atomic add to
5488 * the refcnt
5489 */
5490 if (freecnt_applied) {
5491 #ifdef INVARIANTS
5492 panic("refcnt already incremented");
5493 #else
5494 SCTP_PRINTF("refcnt already incremented?\n");
5495 #endif
5496 } else {
5497 atomic_add_int(&stcb->asoc.refcnt, 1);
5498 freecnt_applied = 1;
5499 }
5500 /*
5501 * Setup to remember how much we have not yet told
5502 * the peer our rwnd has opened up. Note we grab the
5503 * value from the tcb from last time. Note too that
5504 * sack sending clears this when a sack is sent,
5505 * which is fine. Once we hit the rwnd_req, we then
5506 * will go to the sctp_user_rcvd() that will not
5507 * lock until it KNOWs it MUST send a WUP-SACK.
5508 */
5509 freed_so_far = stcb->freed_by_sorcv_sincelast;
5510 stcb->freed_by_sorcv_sincelast = 0;
5511 }
5512 }
5513 if (stcb &&
5514 ((control->spec_flags & M_NOTIFICATION) == 0) &&
5515 control->do_not_ref_stcb == 0) {
5516 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5517 }
5518 /* First lets get off the sinfo and sockaddr info */
5519 if ((sinfo != NULL) && (filling_sinfo != 0)) {
5520 sinfo->sinfo_stream = control->sinfo_stream;
5521 sinfo->sinfo_ssn = (uint16_t)control->mid;
5522 sinfo->sinfo_flags = control->sinfo_flags;
5523 sinfo->sinfo_ppid = control->sinfo_ppid;
5524 sinfo->sinfo_context = control->sinfo_context;
5525 sinfo->sinfo_timetolive = control->sinfo_timetolive;
5526 sinfo->sinfo_tsn = control->sinfo_tsn;
5527 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5528 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5529 nxt = TAILQ_NEXT(control, next);
5530 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5531 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5532 struct sctp_extrcvinfo *s_extra;
5533
5534 s_extra = (struct sctp_extrcvinfo *)sinfo;
5535 if ((nxt) &&
5536 (nxt->length)) {
5537 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5538 if (nxt->sinfo_flags & SCTP_UNORDERED) {
5539 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5540 }
5541 if (nxt->spec_flags & M_NOTIFICATION) {
5542 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5543 }
5544 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5545 s_extra->serinfo_next_length = nxt->length;
5546 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5547 s_extra->serinfo_next_stream = nxt->sinfo_stream;
5548 if (nxt->tail_mbuf != NULL) {
5549 if (nxt->end_added) {
5550 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5551 }
5552 }
5553 } else {
5554 /*
5555 * we explicitly 0 this, since the memcpy
5556 * got some other things beyond the older
5557 * sinfo_ that is on the control's structure
5558 * :-D
5559 */
5560 nxt = NULL;
5561 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5562 s_extra->serinfo_next_aid = 0;
5563 s_extra->serinfo_next_length = 0;
5564 s_extra->serinfo_next_ppid = 0;
5565 s_extra->serinfo_next_stream = 0;
5566 }
5567 }
5568 /*
5569 * update off the real current cum-ack, if we have an stcb.
5570 */
5571 if ((control->do_not_ref_stcb == 0) && stcb)
5572 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5573 /*
5574 * mask off the high bits, we keep the actual chunk bits in
5575 * there.
5576 */
5577 sinfo->sinfo_flags &= 0x00ff;
5578 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5579 sinfo->sinfo_flags |= SCTP_UNORDERED;
5580 }
5581 }
5582 #ifdef SCTP_ASOCLOG_OF_TSNS
5583 {
5584 int index, newindex;
5585 struct sctp_pcbtsn_rlog *entry;
5586
5587 do {
5588 index = inp->readlog_index;
5589 newindex = index + 1;
5590 if (newindex >= SCTP_READ_LOG_SIZE) {
5591 newindex = 0;
5592 }
5593 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5594 entry = &inp->readlog[index];
5595 entry->vtag = control->sinfo_assoc_id;
5596 entry->strm = control->sinfo_stream;
5597 entry->seq = (uint16_t)control->mid;
5598 entry->sz = control->length;
5599 entry->flgs = control->sinfo_flags;
5600 }
5601 #endif
5602 if ((fromlen > 0) && (from != NULL)) {
5603 union sctp_sockstore store;
5604 size_t len;
5605
5606 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5607 #ifdef INET6
5608 case AF_INET6:
5609 len = sizeof(struct sockaddr_in6);
5610 store.sin6 = control->whoFrom->ro._l_addr.sin6;
5611 store.sin6.sin6_port = control->port_from;
5612 break;
5613 #endif
5614 #ifdef INET
5615 case AF_INET:
5616 #ifdef INET6
5617 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5618 len = sizeof(struct sockaddr_in6);
5619 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5620 &store.sin6);
5621 store.sin6.sin6_port = control->port_from;
5622 } else {
5623 len = sizeof(struct sockaddr_in);
5624 store.sin = control->whoFrom->ro._l_addr.sin;
5625 store.sin.sin_port = control->port_from;
5626 }
5627 #else
5628 len = sizeof(struct sockaddr_in);
5629 store.sin = control->whoFrom->ro._l_addr.sin;
5630 store.sin.sin_port = control->port_from;
5631 #endif
5632 break;
5633 #endif
5634 default:
5635 len = 0;
5636 break;
5637 }
5638 memcpy(from, &store, min((size_t)fromlen, len));
5639 #ifdef INET6
5640 {
5641 struct sockaddr_in6 lsa6, *from6;
5642
5643 from6 = (struct sockaddr_in6 *)from;
5644 sctp_recover_scope_mac(from6, (&lsa6));
5645 }
5646 #endif
5647 }
5648 if (hold_rlock) {
5649 SCTP_INP_READ_UNLOCK(inp);
5650 hold_rlock = 0;
5651 }
5652 if (hold_sblock) {
5653 SOCKBUF_UNLOCK(&so->so_rcv);
5654 hold_sblock = 0;
5655 }
5656 /* now copy out what data we can */
5657 if (mp == NULL) {
5658 /* copy out each mbuf in the chain up to length */
5659 get_more_data:
5660 m = control->data;
5661 while (m) {
5662 /* Move out all we can */
5663 cp_len = (int)uio->uio_resid;
5664 my_len = (int)SCTP_BUF_LEN(m);
5665 if (cp_len > my_len) {
5666 /* not enough in this buf */
5667 cp_len = my_len;
5668 }
5669 if (hold_rlock) {
5670 SCTP_INP_READ_UNLOCK(inp);
5671 hold_rlock = 0;
5672 }
5673 if (cp_len > 0)
5674 error = uiomove(mtod(m, char *), cp_len, uio);
5675 /* re-read */
5676 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5677 goto release;
5678 }
5679 if ((control->do_not_ref_stcb == 0) && stcb &&
5680 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5681 no_rcv_needed = 1;
5682 }
5683 if (error) {
5684 /* error we are out of here */
5685 goto release;
5686 }
5687 SCTP_INP_READ_LOCK(inp);
5688 hold_rlock = 1;
5689 if (cp_len == SCTP_BUF_LEN(m)) {
5690 if ((SCTP_BUF_NEXT(m) == NULL) &&
5691 (control->end_added)) {
5692 out_flags |= MSG_EOR;
5693 if ((control->do_not_ref_stcb == 0) &&
5694 (control->stcb != NULL) &&
5695 ((control->spec_flags & M_NOTIFICATION) == 0))
5696 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5697 }
5698 if (control->spec_flags & M_NOTIFICATION) {
5699 out_flags |= MSG_NOTIFICATION;
5700 }
5701 /* we ate up the mbuf */
5702 if (in_flags & MSG_PEEK) {
5703 /* just looking */
5704 m = SCTP_BUF_NEXT(m);
5705 copied_so_far += cp_len;
5706 } else {
5707 /* dispose of the mbuf */
5708 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5709 sctp_sblog(&so->so_rcv,
5710 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5711 }
5712 sctp_sbfree(control, stcb, &so->so_rcv, m);
5713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5714 sctp_sblog(&so->so_rcv,
5715 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5716 }
5717 copied_so_far += cp_len;
5718 freed_so_far += cp_len;
5719 freed_so_far += MSIZE;
5720 atomic_subtract_int(&control->length, cp_len);
5721 control->data = sctp_m_free(m);
5722 m = control->data;
5723 /*
5724 * been through it all, must hold sb
5725 * lock ok to null tail
5726 */
5727 if (control->data == NULL) {
5728 #ifdef INVARIANTS
5729 if ((control->end_added == 0) ||
5730 (TAILQ_NEXT(control, next) == NULL)) {
5731 /*
5732 * If the end is not
5733 * added, OR the
5734 * next is NOT null
5735 * we MUST have the
5736 * lock.
5737 */
5738 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5739 panic("Hmm we don't own the lock?");
5740 }
5741 }
5742 #endif
5743 control->tail_mbuf = NULL;
5744 #ifdef INVARIANTS
5745 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5746 panic("end_added, nothing left and no MSG_EOR");
5747 }
5748 #endif
5749 }
5750 }
5751 } else {
5752 /* Do we need to trim the mbuf? */
5753 if (control->spec_flags & M_NOTIFICATION) {
5754 out_flags |= MSG_NOTIFICATION;
5755 }
5756 if ((in_flags & MSG_PEEK) == 0) {
5757 SCTP_BUF_RESV_UF(m, cp_len);
5758 SCTP_BUF_LEN(m) -= cp_len;
5759 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5761 }
5762 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5763 if ((control->do_not_ref_stcb == 0) &&
5764 stcb) {
5765 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5766 }
5767 copied_so_far += cp_len;
5768 freed_so_far += cp_len;
5769 freed_so_far += MSIZE;
5770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5771 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5772 SCTP_LOG_SBRESULT, 0);
5773 }
5774 atomic_subtract_int(&control->length, cp_len);
5775 } else {
5776 copied_so_far += cp_len;
5777 }
5778 }
5779 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5780 break;
5781 }
5782 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5783 (control->do_not_ref_stcb == 0) &&
5784 (freed_so_far >= rwnd_req)) {
5785 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5786 }
5787 } /* end while(m) */
5788 /*
5789 * At this point we have looked at it all and we either have
5790 * a MSG_EOR/or read all the user wants... <OR>
5791 * control->length == 0.
5792 */
5793 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5794 /* we are done with this control */
5795 if (control->length == 0) {
5796 if (control->data) {
5797 #ifdef INVARIANTS
5798 panic("control->data not null at read eor?");
5799 #else
5800 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5801 sctp_m_freem(control->data);
5802 control->data = NULL;
5803 #endif
5804 }
5805 done_with_control:
5806 if (hold_rlock == 0) {
5807 SCTP_INP_READ_LOCK(inp);
5808 hold_rlock = 1;
5809 }
5810 TAILQ_REMOVE(&inp->read_queue, control, next);
5811 /* Add back any hiddend data */
5812 if (control->held_length) {
5813 held_length = 0;
5814 control->held_length = 0;
5815 wakeup_read_socket = 1;
5816 }
5817 if (control->aux_data) {
5818 sctp_m_free(control->aux_data);
5819 control->aux_data = NULL;
5820 }
5821 no_rcv_needed = control->do_not_ref_stcb;
5822 sctp_free_remote_addr(control->whoFrom);
5823 control->data = NULL;
5824 #ifdef INVARIANTS
5825 if (control->on_strm_q) {
5826 panic("About to free ctl:%p so:%p and its in %d",
5827 control, so, control->on_strm_q);
5828 }
5829 #endif
5830 sctp_free_a_readq(stcb, control);
5831 control = NULL;
5832 if ((freed_so_far >= rwnd_req) &&
5833 (no_rcv_needed == 0))
5834 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5835
5836 } else {
5837 /*
5838 * The user did not read all of this
5839 * message, turn off the returned MSG_EOR
5840 * since we are leaving more behind on the
5841 * control to read.
5842 */
5843 #ifdef INVARIANTS
5844 if (control->end_added &&
5845 (control->data == NULL) &&
5846 (control->tail_mbuf == NULL)) {
5847 panic("Gak, control->length is corrupt?");
5848 }
5849 #endif
5850 no_rcv_needed = control->do_not_ref_stcb;
5851 out_flags &= ~MSG_EOR;
5852 }
5853 }
5854 if (out_flags & MSG_EOR) {
5855 goto release;
5856 }
5857 if ((uio->uio_resid == 0) ||
5858 ((in_eeor_mode) &&
5859 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5860 goto release;
5861 }
5862 /*
5863 * If I hit here the receiver wants more and this message is
5864 * NOT done (pd-api). So two questions. Can we block? if not
5865 * we are done. Did the user NOT set MSG_WAITALL?
5866 */
5867 if (block_allowed == 0) {
5868 goto release;
5869 }
5870 /*
5871 * We need to wait for more data a few things: - We don't
5872 * sbunlock() so we don't get someone else reading. - We
5873 * must be sure to account for the case where what is added
5874 * is NOT to our control when we wakeup.
5875 */
5876
5877 /*
5878 * Do we need to tell the transport a rwnd update might be
5879 * needed before we go to sleep?
5880 */
5881 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5882 ((freed_so_far >= rwnd_req) &&
5883 (control->do_not_ref_stcb == 0) &&
5884 (no_rcv_needed == 0))) {
5885 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5886 }
5887 wait_some_more:
5888 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5889 goto release;
5890 }
5891 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5892 goto release;
5893
5894 if (hold_rlock == 1) {
5895 SCTP_INP_READ_UNLOCK(inp);
5896 hold_rlock = 0;
5897 }
5898 if (hold_sblock == 0) {
5899 SOCKBUF_LOCK(&so->so_rcv);
5900 hold_sblock = 1;
5901 }
5902 if ((copied_so_far) && (control->length == 0) &&
5903 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5904 goto release;
5905 }
5906 if (so->so_rcv.sb_cc <= control->held_length) {
5907 error = sbwait(&so->so_rcv);
5908 if (error) {
5909 goto release;
5910 }
5911 control->held_length = 0;
5912 }
5913 if (hold_sblock) {
5914 SOCKBUF_UNLOCK(&so->so_rcv);
5915 hold_sblock = 0;
5916 }
5917 if (control->length == 0) {
5918 /* still nothing here */
5919 if (control->end_added == 1) {
5920 /* he aborted, or is done i.e.did a shutdown */
5921 out_flags |= MSG_EOR;
5922 if (control->pdapi_aborted) {
5923 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5924 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5925
5926 out_flags |= MSG_TRUNC;
5927 } else {
5928 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5929 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5930 }
5931 goto done_with_control;
5932 }
5933 if (so->so_rcv.sb_cc > held_length) {
5934 control->held_length = so->so_rcv.sb_cc;
5935 held_length = 0;
5936 }
5937 goto wait_some_more;
5938 } else if (control->data == NULL) {
5939 /*
5940 * we must re-sync since data is probably being
5941 * added
5942 */
5943 SCTP_INP_READ_LOCK(inp);
5944 if ((control->length > 0) && (control->data == NULL)) {
5945 /*
5946 * big trouble.. we have the lock and its
5947 * corrupt?
5948 */
5949 #ifdef INVARIANTS
5950 panic("Impossible data==NULL length !=0");
5951 #endif
5952 out_flags |= MSG_EOR;
5953 out_flags |= MSG_TRUNC;
5954 control->length = 0;
5955 SCTP_INP_READ_UNLOCK(inp);
5956 goto done_with_control;
5957 }
5958 SCTP_INP_READ_UNLOCK(inp);
5959 /* We will fall around to get more data */
5960 }
5961 goto get_more_data;
5962 } else {
5963 /*-
5964 * Give caller back the mbuf chain,
5965 * store in uio_resid the length
5966 */
5967 wakeup_read_socket = 0;
5968 if ((control->end_added == 0) ||
5969 (TAILQ_NEXT(control, next) == NULL)) {
5970 /* Need to get rlock */
5971 if (hold_rlock == 0) {
5972 SCTP_INP_READ_LOCK(inp);
5973 hold_rlock = 1;
5974 }
5975 }
5976 if (control->end_added) {
5977 out_flags |= MSG_EOR;
5978 if ((control->do_not_ref_stcb == 0) &&
5979 (control->stcb != NULL) &&
5980 ((control->spec_flags & M_NOTIFICATION) == 0))
5981 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5982 }
5983 if (control->spec_flags & M_NOTIFICATION) {
5984 out_flags |= MSG_NOTIFICATION;
5985 }
5986 uio->uio_resid = control->length;
5987 *mp = control->data;
5988 m = control->data;
5989 while (m) {
5990 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5991 sctp_sblog(&so->so_rcv,
5992 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5993 }
5994 sctp_sbfree(control, stcb, &so->so_rcv, m);
5995 freed_so_far += SCTP_BUF_LEN(m);
5996 freed_so_far += MSIZE;
5997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5998 sctp_sblog(&so->so_rcv,
5999 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6000 }
6001 m = SCTP_BUF_NEXT(m);
6002 }
6003 control->data = control->tail_mbuf = NULL;
6004 control->length = 0;
6005 if (out_flags & MSG_EOR) {
6006 /* Done with this control */
6007 goto done_with_control;
6008 }
6009 }
6010 release:
6011 if (hold_rlock == 1) {
6012 SCTP_INP_READ_UNLOCK(inp);
6013 hold_rlock = 0;
6014 }
6015 if (hold_sblock == 1) {
6016 SOCKBUF_UNLOCK(&so->so_rcv);
6017 hold_sblock = 0;
6018 }
6019 sbunlock(&so->so_rcv);
6020 sockbuf_lock = 0;
6021
6022 release_unlocked:
6023 if (hold_sblock) {
6024 SOCKBUF_UNLOCK(&so->so_rcv);
6025 hold_sblock = 0;
6026 }
6027 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6028 if ((freed_so_far >= rwnd_req) &&
6029 (control && (control->do_not_ref_stcb == 0)) &&
6030 (no_rcv_needed == 0))
6031 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6032 }
6033 out:
6034 if (msg_flags) {
6035 *msg_flags = out_flags;
6036 }
6037 if (((out_flags & MSG_EOR) == 0) &&
6038 ((in_flags & MSG_PEEK) == 0) &&
6039 (sinfo) &&
6040 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6041 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6042 struct sctp_extrcvinfo *s_extra;
6043
6044 s_extra = (struct sctp_extrcvinfo *)sinfo;
6045 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6046 }
6047 if (hold_rlock == 1) {
6048 SCTP_INP_READ_UNLOCK(inp);
6049 }
6050 if (hold_sblock) {
6051 SOCKBUF_UNLOCK(&so->so_rcv);
6052 }
6053 if (sockbuf_lock) {
6054 sbunlock(&so->so_rcv);
6055 }
6056 if (freecnt_applied) {
6057 /*
6058 * The lock on the socket buffer protects us so the free
6059 * code will stop. But since we used the socketbuf lock and
6060 * the sender uses the tcb_lock to increment, we need to use
6061 * the atomic add to the refcnt.
6062 */
6063 if (stcb == NULL) {
6064 #ifdef INVARIANTS
6065 panic("stcb for refcnt has gone NULL?");
6066 goto stage_left;
6067 #else
6068 goto stage_left;
6069 #endif
6070 }
6071 /* Save the value back for next time */
6072 stcb->freed_by_sorcv_sincelast = freed_so_far;
6073 atomic_add_int(&stcb->asoc.refcnt, -1);
6074 }
6075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6076 if (stcb) {
6077 sctp_misc_ints(SCTP_SORECV_DONE,
6078 freed_so_far,
6079 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6080 stcb->asoc.my_rwnd,
6081 so->so_rcv.sb_cc);
6082 } else {
6083 sctp_misc_ints(SCTP_SORECV_DONE,
6084 freed_so_far,
6085 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6086 0,
6087 so->so_rcv.sb_cc);
6088 }
6089 }
6090 stage_left:
6091 if (wakeup_read_socket) {
6092 sctp_sorwakeup(inp, so);
6093 }
6094 return (error);
6095 }
6096
6097
6098 #ifdef SCTP_MBUF_LOGGING
6099 struct mbuf *
6100 sctp_m_free(struct mbuf *m)
6101 {
6102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6103 sctp_log_mb(m, SCTP_MBUF_IFREE);
6104 }
6105 return (m_free(m));
6106 }
6107
6108 void
6109 sctp_m_freem(struct mbuf *mb)
6110 {
6111 while (mb != NULL)
6112 mb = sctp_m_free(mb);
6113 }
6114
6115 #endif
6116
6117 int
6118 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6119 {
6120 /*
6121 * Given a local address. For all associations that holds the
6122 * address, request a peer-set-primary.
6123 */
6124 struct sctp_ifa *ifa;
6125 struct sctp_laddr *wi;
6126
6127 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6128 if (ifa == NULL) {
6129 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6130 return (EADDRNOTAVAIL);
6131 }
6132 /*
6133 * Now that we have the ifa we must awaken the iterator with this
6134 * message.
6135 */
6136 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6137 if (wi == NULL) {
6138 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6139 return (ENOMEM);
6140 }
6141 /* Now incr the count and int wi structure */
6142 SCTP_INCR_LADDR_COUNT();
6143 memset(wi, 0, sizeof(*wi));
6144 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6145 wi->ifa = ifa;
6146 wi->action = SCTP_SET_PRIM_ADDR;
6147 atomic_add_int(&ifa->refcount, 1);
6148
6149 /* Now add it to the work queue */
6150 SCTP_WQ_ADDR_LOCK();
6151 /*
6152 * Should this really be a tailq? As it is we will process the
6153 * newest first :-0
6154 */
6155 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6156 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6157 (struct sctp_inpcb *)NULL,
6158 (struct sctp_tcb *)NULL,
6159 (struct sctp_nets *)NULL);
6160 SCTP_WQ_ADDR_UNLOCK();
6161 return (0);
6162 }
6163
6164
6165 int
6166 sctp_soreceive(struct socket *so,
6167 struct sockaddr **psa,
6168 struct uio *uio,
6169 struct mbuf **mp0,
6170 struct mbuf **controlp,
6171 int *flagsp)
6172 {
6173 int error, fromlen;
6174 uint8_t sockbuf[256];
6175 struct sockaddr *from;
6176 struct sctp_extrcvinfo sinfo;
6177 int filling_sinfo = 1;
6178 int flags;
6179 struct sctp_inpcb *inp;
6180
6181 inp = (struct sctp_inpcb *)so->so_pcb;
6182 /* pickup the assoc we are reading from */
6183 if (inp == NULL) {
6184 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6185 return (EINVAL);
6186 }
6187 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6190 (controlp == NULL)) {
6191 /* user does not want the sndrcv ctl */
6192 filling_sinfo = 0;
6193 }
6194 if (psa) {
6195 from = (struct sockaddr *)sockbuf;
6196 fromlen = sizeof(sockbuf);
6197 from->sa_len = 0;
6198 } else {
6199 from = NULL;
6200 fromlen = 0;
6201 }
6202
6203 if (filling_sinfo) {
6204 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6205 }
6206 if (flagsp != NULL) {
6207 flags = *flagsp;
6208 } else {
6209 flags = 0;
6210 }
6211 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6212 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6213 if (flagsp != NULL) {
6214 *flagsp = flags;
6215 }
6216 if (controlp != NULL) {
6217 /* copy back the sinfo in a CMSG format */
6218 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6219 *controlp = sctp_build_ctl_nchunk(inp,
6220 (struct sctp_sndrcvinfo *)&sinfo);
6221 } else {
6222 *controlp = NULL;
6223 }
6224 }
6225 if (psa) {
6226 /* copy back the address info */
6227 if (from && from->sa_len) {
6228 *psa = sodupsockaddr(from, M_NOWAIT);
6229 } else {
6230 *psa = NULL;
6231 }
6232 }
6233 return (error);
6234 }
6235
6236
6237
6238
6239
6240 int
6241 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6242 int totaddr, int *error)
6243 {
6244 int added = 0;
6245 int i;
6246 struct sctp_inpcb *inp;
6247 struct sockaddr *sa;
6248 size_t incr = 0;
6249 #ifdef INET
6250 struct sockaddr_in *sin;
6251 #endif
6252 #ifdef INET6
6253 struct sockaddr_in6 *sin6;
6254 #endif
6255
6256 sa = addr;
6257 inp = stcb->sctp_ep;
6258 *error = 0;
6259 for (i = 0; i < totaddr; i++) {
6260 switch (sa->sa_family) {
6261 #ifdef INET
6262 case AF_INET:
6263 incr = sizeof(struct sockaddr_in);
6264 sin = (struct sockaddr_in *)sa;
6265 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6266 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6267 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6268 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6269 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6270 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6271 *error = EINVAL;
6272 goto out_now;
6273 }
6274 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6275 SCTP_DONOT_SETSCOPE,
6276 SCTP_ADDR_IS_CONFIRMED)) {
6277 /* assoc gone no un-lock */
6278 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6279 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6280 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6281 *error = ENOBUFS;
6282 goto out_now;
6283 }
6284 added++;
6285 break;
6286 #endif
6287 #ifdef INET6
6288 case AF_INET6:
6289 incr = sizeof(struct sockaddr_in6);
6290 sin6 = (struct sockaddr_in6 *)sa;
6291 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6292 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6293 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6294 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6295 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6296 *error = EINVAL;
6297 goto out_now;
6298 }
6299 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6300 SCTP_DONOT_SETSCOPE,
6301 SCTP_ADDR_IS_CONFIRMED)) {
6302 /* assoc gone no un-lock */
6303 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6304 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6305 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6306 *error = ENOBUFS;
6307 goto out_now;
6308 }
6309 added++;
6310 break;
6311 #endif
6312 default:
6313 break;
6314 }
6315 sa = (struct sockaddr *)((caddr_t)sa + incr);
6316 }
6317 out_now:
6318 return (added);
6319 }
6320
6321 struct sctp_tcb *
6322 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6323 unsigned int *totaddr,
6324 unsigned int *num_v4, unsigned int *num_v6, int *error,
6325 unsigned int limit, int *bad_addr)
6326 {
6327 struct sockaddr *sa;
6328 struct sctp_tcb *stcb = NULL;
6329 unsigned int incr, at, i;
6330
6331 at = 0;
6332 sa = addr;
6333 *error = *num_v6 = *num_v4 = 0;
6334 /* account and validate addresses */
6335 for (i = 0; i < *totaddr; i++) {
6336 switch (sa->sa_family) {
6337 #ifdef INET
6338 case AF_INET:
6339 incr = (unsigned int)sizeof(struct sockaddr_in);
6340 if (sa->sa_len != incr) {
6341 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6342 *error = EINVAL;
6343 *bad_addr = 1;
6344 return (NULL);
6345 }
6346 (*num_v4) += 1;
6347 break;
6348 #endif
6349 #ifdef INET6
6350 case AF_INET6:
6351 {
6352 struct sockaddr_in6 *sin6;
6353
6354 sin6 = (struct sockaddr_in6 *)sa;
6355 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6356 /* Must be non-mapped for connectx */
6357 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358 *error = EINVAL;
6359 *bad_addr = 1;
6360 return (NULL);
6361 }
6362 incr = (unsigned int)sizeof(struct sockaddr_in6);
6363 if (sa->sa_len != incr) {
6364 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6365 *error = EINVAL;
6366 *bad_addr = 1;
6367 return (NULL);
6368 }
6369 (*num_v6) += 1;
6370 break;
6371 }
6372 #endif
6373 default:
6374 *totaddr = i;
6375 incr = 0;
6376 /* we are done */
6377 break;
6378 }
6379 if (i == *totaddr) {
6380 break;
6381 }
6382 SCTP_INP_INCR_REF(inp);
6383 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6384 if (stcb != NULL) {
6385 /* Already have or am bring up an association */
6386 return (stcb);
6387 } else {
6388 SCTP_INP_DECR_REF(inp);
6389 }
6390 if ((at + incr) > limit) {
6391 *totaddr = i;
6392 break;
6393 }
6394 sa = (struct sockaddr *)((caddr_t)sa + incr);
6395 }
6396 return ((struct sctp_tcb *)NULL);
6397 }
6398
6399 /*
6400 * sctp_bindx(ADD) for one address.
6401 * assumes all arguments are valid/checked by caller.
6402 */
6403 void
6404 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6405 struct sockaddr *sa, sctp_assoc_t assoc_id,
6406 uint32_t vrf_id, int *error, void *p)
6407 {
6408 struct sockaddr *addr_touse;
6409 #if defined(INET) && defined(INET6)
6410 struct sockaddr_in sin;
6411 #endif
6412
6413 /* see if we're bound all already! */
6414 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6415 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 *error = EINVAL;
6417 return;
6418 }
6419 addr_touse = sa;
6420 #ifdef INET6
6421 if (sa->sa_family == AF_INET6) {
6422 #ifdef INET
6423 struct sockaddr_in6 *sin6;
6424
6425 #endif
6426 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6427 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6428 *error = EINVAL;
6429 return;
6430 }
6431 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6432 /* can only bind v6 on PF_INET6 sockets */
6433 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6434 *error = EINVAL;
6435 return;
6436 }
6437 #ifdef INET
6438 sin6 = (struct sockaddr_in6 *)addr_touse;
6439 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6440 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6441 SCTP_IPV6_V6ONLY(inp)) {
6442 /* can't bind v4-mapped on PF_INET sockets */
6443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 *error = EINVAL;
6445 return;
6446 }
6447 in6_sin6_2_sin(&sin, sin6);
6448 addr_touse = (struct sockaddr *)&sin;
6449 }
6450 #endif
6451 }
6452 #endif
6453 #ifdef INET
6454 if (sa->sa_family == AF_INET) {
6455 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 *error = EINVAL;
6458 return;
6459 }
6460 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6461 SCTP_IPV6_V6ONLY(inp)) {
6462 /* can't bind v4 on PF_INET sockets */
6463 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6464 *error = EINVAL;
6465 return;
6466 }
6467 }
6468 #endif
6469 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6470 if (p == NULL) {
6471 /* Can't get proc for Net/Open BSD */
6472 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 *error = EINVAL;
6474 return;
6475 }
6476 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6477 return;
6478 }
6479 /*
6480 * No locks required here since bind and mgmt_ep_sa all do their own
6481 * locking. If we do something for the FIX: below we may need to
6482 * lock in that case.
6483 */
6484 if (assoc_id == 0) {
6485 /* add the address */
6486 struct sctp_inpcb *lep;
6487 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6488
6489 /* validate the incoming port */
6490 if ((lsin->sin_port != 0) &&
6491 (lsin->sin_port != inp->sctp_lport)) {
6492 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 *error = EINVAL;
6494 return;
6495 } else {
6496 /* user specified 0 port, set it to existing port */
6497 lsin->sin_port = inp->sctp_lport;
6498 }
6499
6500 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6501 if (lep != NULL) {
6502 /*
6503 * We must decrement the refcount since we have the
6504 * ep already and are binding. No remove going on
6505 * here.
6506 */
6507 SCTP_INP_DECR_REF(lep);
6508 }
6509 if (lep == inp) {
6510 /* already bound to it.. ok */
6511 return;
6512 } else if (lep == NULL) {
6513 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
6514 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6515 SCTP_ADD_IP_ADDRESS,
6516 vrf_id, NULL);
6517 } else {
6518 *error = EADDRINUSE;
6519 }
6520 if (*error)
6521 return;
6522 } else {
6523 /*
6524 * FIX: decide whether we allow assoc based bindx
6525 */
6526 }
6527 }
6528
6529 /*
6530 * sctp_bindx(DELETE) for one address.
6531 * assumes all arguments are valid/checked by caller.
6532 */
6533 void
6534 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6535 struct sockaddr *sa, sctp_assoc_t assoc_id,
6536 uint32_t vrf_id, int *error)
6537 {
6538 struct sockaddr *addr_touse;
6539 #if defined(INET) && defined(INET6)
6540 struct sockaddr_in sin;
6541 #endif
6542
6543 /* see if we're bound all already! */
6544 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6545 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6546 *error = EINVAL;
6547 return;
6548 }
6549 addr_touse = sa;
6550 #ifdef INET6
6551 if (sa->sa_family == AF_INET6) {
6552 #ifdef INET
6553 struct sockaddr_in6 *sin6;
6554 #endif
6555
6556 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6557 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6558 *error = EINVAL;
6559 return;
6560 }
6561 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6562 /* can only bind v6 on PF_INET6 sockets */
6563 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564 *error = EINVAL;
6565 return;
6566 }
6567 #ifdef INET
6568 sin6 = (struct sockaddr_in6 *)addr_touse;
6569 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6570 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6571 SCTP_IPV6_V6ONLY(inp)) {
6572 /* can't bind mapped-v4 on PF_INET sockets */
6573 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 *error = EINVAL;
6575 return;
6576 }
6577 in6_sin6_2_sin(&sin, sin6);
6578 addr_touse = (struct sockaddr *)&sin;
6579 }
6580 #endif
6581 }
6582 #endif
6583 #ifdef INET
6584 if (sa->sa_family == AF_INET) {
6585 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6586 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 *error = EINVAL;
6588 return;
6589 }
6590 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6591 SCTP_IPV6_V6ONLY(inp)) {
6592 /* can't bind v4 on PF_INET sockets */
6593 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6594 *error = EINVAL;
6595 return;
6596 }
6597 }
6598 #endif
6599 /*
6600 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6601 * below is ever changed we may need to lock before calling
6602 * association level binding.
6603 */
6604 if (assoc_id == 0) {
6605 /* delete the address */
6606 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6607 SCTP_DEL_IP_ADDRESS,
6608 vrf_id, NULL);
6609 } else {
6610 /*
6611 * FIX: decide whether we allow assoc based bindx
6612 */
6613 }
6614 }
6615
6616 /*
6617 * returns the valid local address count for an assoc, taking into account
6618 * all scoping rules
6619 */
6620 int
6621 sctp_local_addr_count(struct sctp_tcb *stcb)
6622 {
6623 int loopback_scope;
6624 #if defined(INET)
6625 int ipv4_local_scope, ipv4_addr_legal;
6626 #endif
6627 #if defined (INET6)
6628 int local_scope, site_scope, ipv6_addr_legal;
6629 #endif
6630 struct sctp_vrf *vrf;
6631 struct sctp_ifn *sctp_ifn;
6632 struct sctp_ifa *sctp_ifa;
6633 int count = 0;
6634
6635 /* Turn on all the appropriate scopes */
6636 loopback_scope = stcb->asoc.scope.loopback_scope;
6637 #if defined(INET)
6638 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6639 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6640 #endif
6641 #if defined(INET6)
6642 local_scope = stcb->asoc.scope.local_scope;
6643 site_scope = stcb->asoc.scope.site_scope;
6644 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6645 #endif
6646 SCTP_IPI_ADDR_RLOCK();
6647 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6648 if (vrf == NULL) {
6649 /* no vrf, no addresses */
6650 SCTP_IPI_ADDR_RUNLOCK();
6651 return (0);
6652 }
6653 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6654 /*
6655 * bound all case: go through all ifns on the vrf
6656 */
6657 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6658 if ((loopback_scope == 0) &&
6659 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6660 continue;
6661 }
6662 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6663 if (sctp_is_addr_restricted(stcb, sctp_ifa))
6664 continue;
6665 switch (sctp_ifa->address.sa.sa_family) {
6666 #ifdef INET
6667 case AF_INET:
6668 if (ipv4_addr_legal) {
6669 struct sockaddr_in *sin;
6670
6671 sin = &sctp_ifa->address.sin;
6672 if (sin->sin_addr.s_addr == 0) {
6673 /*
6674 * skip unspecified
6675 * addrs
6676 */
6677 continue;
6678 }
6679 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6680 &sin->sin_addr) != 0) {
6681 continue;
6682 }
6683 if ((ipv4_local_scope == 0) &&
6684 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6685 continue;
6686 }
6687 /* count this one */
6688 count++;
6689 } else {
6690 continue;
6691 }
6692 break;
6693 #endif
6694 #ifdef INET6
6695 case AF_INET6:
6696 if (ipv6_addr_legal) {
6697 struct sockaddr_in6 *sin6;
6698
6699 sin6 = &sctp_ifa->address.sin6;
6700 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6701 continue;
6702 }
6703 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6704 &sin6->sin6_addr) != 0) {
6705 continue;
6706 }
6707 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6708 if (local_scope == 0)
6709 continue;
6710 if (sin6->sin6_scope_id == 0) {
6711 if (sa6_recoverscope(sin6) != 0)
6712 /*
6713 *
6714 * bad
6715 * link
6716 *
6717 * local
6718 *
6719 * address
6720 */
6721 continue;
6722 }
6723 }
6724 if ((site_scope == 0) &&
6725 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6726 continue;
6727 }
6728 /* count this one */
6729 count++;
6730 }
6731 break;
6732 #endif
6733 default:
6734 /* TSNH */
6735 break;
6736 }
6737 }
6738 }
6739 } else {
6740 /*
6741 * subset bound case
6742 */
6743 struct sctp_laddr *laddr;
6744
6745 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6746 sctp_nxt_addr) {
6747 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6748 continue;
6749 }
6750 /* count this one */
6751 count++;
6752 }
6753 }
6754 SCTP_IPI_ADDR_RUNLOCK();
6755 return (count);
6756 }
6757
6758 #if defined(SCTP_LOCAL_TRACE_BUF)
6759
6760 void
6761 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6762 {
6763 uint32_t saveindex, newindex;
6764
6765 do {
6766 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6767 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6768 newindex = 1;
6769 } else {
6770 newindex = saveindex + 1;
6771 }
6772 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6773 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6774 saveindex = 0;
6775 }
6776 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6777 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6778 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6779 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6780 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6781 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6782 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6783 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6784 }
6785
6786 #endif
6787 static void
6788 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6789 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6790 {
6791 struct ip *iph;
6792 #ifdef INET6
6793 struct ip6_hdr *ip6;
6794 #endif
6795 struct mbuf *sp, *last;
6796 struct udphdr *uhdr;
6797 uint16_t port;
6798
6799 if ((m->m_flags & M_PKTHDR) == 0) {
6800 /* Can't handle one that is not a pkt hdr */
6801 goto out;
6802 }
6803 /* Pull the src port */
6804 iph = mtod(m, struct ip *);
6805 uhdr = (struct udphdr *)((caddr_t)iph + off);
6806 port = uhdr->uh_sport;
6807 /*
6808 * Split out the mbuf chain. Leave the IP header in m, place the
6809 * rest in the sp.
6810 */
6811 sp = m_split(m, off, M_NOWAIT);
6812 if (sp == NULL) {
6813 /* Gak, drop packet, we can't do a split */
6814 goto out;
6815 }
6816 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6817 /* Gak, packet can't have an SCTP header in it - too small */
6818 m_freem(sp);
6819 goto out;
6820 }
6821 /* Now pull up the UDP header and SCTP header together */
6822 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6823 if (sp == NULL) {
6824 /* Gak pullup failed */
6825 goto out;
6826 }
6827 /* Trim out the UDP header */
6828 m_adj(sp, sizeof(struct udphdr));
6829
6830 /* Now reconstruct the mbuf chain */
6831 for (last = m; last->m_next; last = last->m_next);
6832 last->m_next = sp;
6833 m->m_pkthdr.len += sp->m_pkthdr.len;
6834 /*
6835 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6836 * checksum and it was valid. Since CSUM_DATA_VALID ==
6837 * CSUM_SCTP_VALID this would imply that the HW also verified the
6838 * SCTP checksum. Therefore, clear the bit.
6839 */
6840 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6841 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6842 m->m_pkthdr.len,
6843 if_name(m->m_pkthdr.rcvif),
6844 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6845 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6846 iph = mtod(m, struct ip *);
6847 switch (iph->ip_v) {
6848 #ifdef INET
6849 case IPVERSION:
6850 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6851 sctp_input_with_port(m, off, port);
6852 break;
6853 #endif
6854 #ifdef INET6
6855 case IPV6_VERSION >> 4:
6856 ip6 = mtod(m, struct ip6_hdr *);
6857 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6858 sctp6_input_with_port(&m, &off, port);
6859 break;
6860 #endif
6861 default:
6862 goto out;
6863 break;
6864 }
6865 return;
6866 out:
6867 m_freem(m);
6868 }
6869
6870 #ifdef INET
6871 static void
6872 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6873 {
6874 struct ip *outer_ip, *inner_ip;
6875 struct sctphdr *sh;
6876 struct icmp *icmp;
6877 struct udphdr *udp;
6878 struct sctp_inpcb *inp;
6879 struct sctp_tcb *stcb;
6880 struct sctp_nets *net;
6881 struct sctp_init_chunk *ch;
6882 struct sockaddr_in src, dst;
6883 uint8_t type, code;
6884
6885 inner_ip = (struct ip *)vip;
6886 icmp = (struct icmp *)((caddr_t)inner_ip -
6887 (sizeof(struct icmp) - sizeof(struct ip)));
6888 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6889 if (ntohs(outer_ip->ip_len) <
6890 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6891 return;
6892 }
6893 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6894 sh = (struct sctphdr *)(udp + 1);
6895 memset(&src, 0, sizeof(struct sockaddr_in));
6896 src.sin_family = AF_INET;
6897 src.sin_len = sizeof(struct sockaddr_in);
6898 src.sin_port = sh->src_port;
6899 src.sin_addr = inner_ip->ip_src;
6900 memset(&dst, 0, sizeof(struct sockaddr_in));
6901 dst.sin_family = AF_INET;
6902 dst.sin_len = sizeof(struct sockaddr_in);
6903 dst.sin_port = sh->dest_port;
6904 dst.sin_addr = inner_ip->ip_dst;
6905 /*
6906 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6907 * holds our local endpoint address. Thus we reverse the dst and the
6908 * src in the lookup.
6909 */
6910 inp = NULL;
6911 net = NULL;
6912 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6913 (struct sockaddr *)&src,
6914 &inp, &net, 1,
6915 SCTP_DEFAULT_VRFID);
6916 if ((stcb != NULL) &&
6917 (net != NULL) &&
6918 (inp != NULL)) {
6919 /* Check the UDP port numbers */
6920 if ((udp->uh_dport != net->port) ||
6921 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6922 SCTP_TCB_UNLOCK(stcb);
6923 return;
6924 }
6925 /* Check the verification tag */
6926 if (ntohl(sh->v_tag) != 0) {
6927 /*
6928 * This must be the verification tag used for
6929 * sending out packets. We don't consider packets
6930 * reflecting the verification tag.
6931 */
6932 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6933 SCTP_TCB_UNLOCK(stcb);
6934 return;
6935 }
6936 } else {
6937 if (ntohs(outer_ip->ip_len) >=
6938 sizeof(struct ip) +
6939 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6940 /*
6941 * In this case we can check if we got an
6942 * INIT chunk and if the initiate tag
6943 * matches.
6944 */
6945 ch = (struct sctp_init_chunk *)(sh + 1);
6946 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6947 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6948 SCTP_TCB_UNLOCK(stcb);
6949 return;
6950 }
6951 } else {
6952 SCTP_TCB_UNLOCK(stcb);
6953 return;
6954 }
6955 }
6956 type = icmp->icmp_type;
6957 code = icmp->icmp_code;
6958 if ((type == ICMP_UNREACH) &&
6959 (code == ICMP_UNREACH_PORT)) {
6960 code = ICMP_UNREACH_PROTOCOL;
6961 }
6962 sctp_notify(inp, stcb, net, type, code,
6963 ntohs(inner_ip->ip_len),
6964 (uint32_t)ntohs(icmp->icmp_nextmtu));
6965 } else {
6966 if ((stcb == NULL) && (inp != NULL)) {
6967 /* reduce ref-count */
6968 SCTP_INP_WLOCK(inp);
6969 SCTP_INP_DECR_REF(inp);
6970 SCTP_INP_WUNLOCK(inp);
6971 }
6972 if (stcb) {
6973 SCTP_TCB_UNLOCK(stcb);
6974 }
6975 }
6976 return;
6977 }
6978 #endif
6979
6980 #ifdef INET6
6981 static void
6982 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6983 {
6984 struct ip6ctlparam *ip6cp;
6985 struct sctp_inpcb *inp;
6986 struct sctp_tcb *stcb;
6987 struct sctp_nets *net;
6988 struct sctphdr sh;
6989 struct udphdr udp;
6990 struct sockaddr_in6 src, dst;
6991 uint8_t type, code;
6992
6993 ip6cp = (struct ip6ctlparam *)d;
6994 /*
6995 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6996 */
6997 if (ip6cp->ip6c_m == NULL) {
6998 return;
6999 }
7000 /*
7001 * Check if we can safely examine the ports and the verification tag
7002 * of the SCTP common header.
7003 */
7004 if (ip6cp->ip6c_m->m_pkthdr.len <
7005 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7006 return;
7007 }
7008 /* Copy out the UDP header. */
7009 memset(&udp, 0, sizeof(struct udphdr));
7010 m_copydata(ip6cp->ip6c_m,
7011 ip6cp->ip6c_off,
7012 sizeof(struct udphdr),
7013 (caddr_t)&udp);
7014 /* Copy out the port numbers and the verification tag. */
7015 memset(&sh, 0, sizeof(struct sctphdr));
7016 m_copydata(ip6cp->ip6c_m,
7017 ip6cp->ip6c_off + sizeof(struct udphdr),
7018 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7019 (caddr_t)&sh);
7020 memset(&src, 0, sizeof(struct sockaddr_in6));
7021 src.sin6_family = AF_INET6;
7022 src.sin6_len = sizeof(struct sockaddr_in6);
7023 src.sin6_port = sh.src_port;
7024 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7025 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7026 return;
7027 }
7028 memset(&dst, 0, sizeof(struct sockaddr_in6));
7029 dst.sin6_family = AF_INET6;
7030 dst.sin6_len = sizeof(struct sockaddr_in6);
7031 dst.sin6_port = sh.dest_port;
7032 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7033 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7034 return;
7035 }
7036 inp = NULL;
7037 net = NULL;
7038 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7039 (struct sockaddr *)&src,
7040 &inp, &net, 1, SCTP_DEFAULT_VRFID);
7041 if ((stcb != NULL) &&
7042 (net != NULL) &&
7043 (inp != NULL)) {
7044 /* Check the UDP port numbers */
7045 if ((udp.uh_dport != net->port) ||
7046 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7047 SCTP_TCB_UNLOCK(stcb);
7048 return;
7049 }
7050 /* Check the verification tag */
7051 if (ntohl(sh.v_tag) != 0) {
7052 /*
7053 * This must be the verification tag used for
7054 * sending out packets. We don't consider packets
7055 * reflecting the verification tag.
7056 */
7057 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7058 SCTP_TCB_UNLOCK(stcb);
7059 return;
7060 }
7061 } else {
7062 if (ip6cp->ip6c_m->m_pkthdr.len >=
7063 ip6cp->ip6c_off + sizeof(struct udphdr) +
7064 sizeof(struct sctphdr) +
7065 sizeof(struct sctp_chunkhdr) +
7066 offsetof(struct sctp_init, a_rwnd)) {
7067 /*
7068 * In this case we can check if we got an
7069 * INIT chunk and if the initiate tag
7070 * matches.
7071 */
7072 uint32_t initiate_tag;
7073 uint8_t chunk_type;
7074
7075 m_copydata(ip6cp->ip6c_m,
7076 ip6cp->ip6c_off +
7077 sizeof(struct udphdr) +
7078 sizeof(struct sctphdr),
7079 sizeof(uint8_t),
7080 (caddr_t)&chunk_type);
7081 m_copydata(ip6cp->ip6c_m,
7082 ip6cp->ip6c_off +
7083 sizeof(struct udphdr) +
7084 sizeof(struct sctphdr) +
7085 sizeof(struct sctp_chunkhdr),
7086 sizeof(uint32_t),
7087 (caddr_t)&initiate_tag);
7088 if ((chunk_type != SCTP_INITIATION) ||
7089 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7090 SCTP_TCB_UNLOCK(stcb);
7091 return;
7092 }
7093 } else {
7094 SCTP_TCB_UNLOCK(stcb);
7095 return;
7096 }
7097 }
7098 type = ip6cp->ip6c_icmp6->icmp6_type;
7099 code = ip6cp->ip6c_icmp6->icmp6_code;
7100 if ((type == ICMP6_DST_UNREACH) &&
7101 (code == ICMP6_DST_UNREACH_NOPORT)) {
7102 type = ICMP6_PARAM_PROB;
7103 code = ICMP6_PARAMPROB_NEXTHEADER;
7104 }
7105 sctp6_notify(inp, stcb, net, type, code,
7106 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7107 } else {
7108 if ((stcb == NULL) && (inp != NULL)) {
7109 /* reduce inp's ref-count */
7110 SCTP_INP_WLOCK(inp);
7111 SCTP_INP_DECR_REF(inp);
7112 SCTP_INP_WUNLOCK(inp);
7113 }
7114 if (stcb) {
7115 SCTP_TCB_UNLOCK(stcb);
7116 }
7117 }
7118 }
7119 #endif
7120
7121 void
7122 sctp_over_udp_stop(void)
7123 {
7124 /*
7125 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7126 * for writting!
7127 */
7128 #ifdef INET
7129 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7130 soclose(SCTP_BASE_INFO(udp4_tun_socket));
7131 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7132 }
7133 #endif
7134 #ifdef INET6
7135 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7136 soclose(SCTP_BASE_INFO(udp6_tun_socket));
7137 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7138 }
7139 #endif
7140 }
7141
7142 int
7143 sctp_over_udp_start(void)
7144 {
7145 uint16_t port;
7146 int ret;
7147 #ifdef INET
7148 struct sockaddr_in sin;
7149 #endif
7150 #ifdef INET6
7151 struct sockaddr_in6 sin6;
7152 #endif
7153 /*
7154 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7155 * for writting!
7156 */
7157 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7158 if (ntohs(port) == 0) {
7159 /* Must have a port set */
7160 return (EINVAL);
7161 }
7162 #ifdef INET
7163 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7164 /* Already running -- must stop first */
7165 return (EALREADY);
7166 }
7167 #endif
7168 #ifdef INET6
7169 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7170 /* Already running -- must stop first */
7171 return (EALREADY);
7172 }
7173 #endif
7174 #ifdef INET
7175 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7176 SOCK_DGRAM, IPPROTO_UDP,
7177 curthread->td_ucred, curthread))) {
7178 sctp_over_udp_stop();
7179 return (ret);
7180 }
7181 /* Call the special UDP hook. */
7182 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7183 sctp_recv_udp_tunneled_packet,
7184 sctp_recv_icmp_tunneled_packet,
7185 NULL))) {
7186 sctp_over_udp_stop();
7187 return (ret);
7188 }
7189 /* Ok, we have a socket, bind it to the port. */
7190 memset(&sin, 0, sizeof(struct sockaddr_in));
7191 sin.sin_len = sizeof(struct sockaddr_in);
7192 sin.sin_family = AF_INET;
7193 sin.sin_port = htons(port);
7194 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7195 (struct sockaddr *)&sin, curthread))) {
7196 sctp_over_udp_stop();
7197 return (ret);
7198 }
7199 #endif
7200 #ifdef INET6
7201 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7202 SOCK_DGRAM, IPPROTO_UDP,
7203 curthread->td_ucred, curthread))) {
7204 sctp_over_udp_stop();
7205 return (ret);
7206 }
7207 /* Call the special UDP hook. */
7208 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7209 sctp_recv_udp_tunneled_packet,
7210 sctp_recv_icmp6_tunneled_packet,
7211 NULL))) {
7212 sctp_over_udp_stop();
7213 return (ret);
7214 }
7215 /* Ok, we have a socket, bind it to the port. */
7216 memset(&sin6, 0, sizeof(struct sockaddr_in6));
7217 sin6.sin6_len = sizeof(struct sockaddr_in6);
7218 sin6.sin6_family = AF_INET6;
7219 sin6.sin6_port = htons(port);
7220 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7221 (struct sockaddr *)&sin6, curthread))) {
7222 sctp_over_udp_stop();
7223 return (ret);
7224 }
7225 #endif
7226 return (0);
7227 }
7228
7229 #if defined(INET6) || defined(INET)
7230
7231 /*
7232 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7233 * If all arguments are zero, zero is returned.
7234 */
7235 uint32_t
7236 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7237 {
7238 if (mtu1 > 0) {
7239 if (mtu2 > 0) {
7240 if (mtu3 > 0) {
7241 return (min(mtu1, min(mtu2, mtu3)));
7242 } else {
7243 return (min(mtu1, mtu2));
7244 }
7245 } else {
7246 if (mtu3 > 0) {
7247 return (min(mtu1, mtu3));
7248 } else {
7249 return (mtu1);
7250 }
7251 }
7252 } else {
7253 if (mtu2 > 0) {
7254 if (mtu3 > 0) {
7255 return (min(mtu2, mtu3));
7256 } else {
7257 return (mtu2);
7258 }
7259 } else {
7260 return (mtu3);
7261 }
7262 }
7263 }
7264
7265 void
7266 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7267 {
7268 struct in_conninfo inc;
7269
7270 memset(&inc, 0, sizeof(struct in_conninfo));
7271 inc.inc_fibnum = fibnum;
7272 switch (addr->sa.sa_family) {
7273 #ifdef INET
7274 case AF_INET:
7275 inc.inc_faddr = addr->sin.sin_addr;
7276 break;
7277 #endif
7278 #ifdef INET6
7279 case AF_INET6:
7280 inc.inc_flags |= INC_ISIPV6;
7281 inc.inc6_faddr = addr->sin6.sin6_addr;
7282 break;
7283 #endif
7284 default:
7285 return;
7286 }
7287 tcp_hc_updatemtu(&inc, (u_long)mtu);
7288 }
7289
7290 uint32_t
7291 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7292 {
7293 struct in_conninfo inc;
7294
7295 memset(&inc, 0, sizeof(struct in_conninfo));
7296 inc.inc_fibnum = fibnum;
7297 switch (addr->sa.sa_family) {
7298 #ifdef INET
7299 case AF_INET:
7300 inc.inc_faddr = addr->sin.sin_addr;
7301 break;
7302 #endif
7303 #ifdef INET6
7304 case AF_INET6:
7305 inc.inc_flags |= INC_ISIPV6;
7306 inc.inc6_faddr = addr->sin6.sin6_addr;
7307 break;
7308 #endif
7309 default:
7310 return (0);
7311 }
7312 return ((uint32_t)tcp_hc_getmtu(&inc));
7313 }
7314 #endif
Cache object: 7d423deadbf5cf23635ca18b8f0bde23
|