1 /*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.4/sys/netinet/sctputil.c 268435 2014-07-08 21:55:39Z delphij $");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55
56
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 struct sctp_cwnd_log sctp_clog;
68
69 sctp_clog.x.sb.stcb = stcb;
70 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 if (stcb)
72 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 else
74 sctp_clog.x.sb.stcb_sbcc = 0;
75 sctp_clog.x.sb.incr = incr;
76 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 SCTP_LOG_EVENT_SB,
78 from,
79 sctp_clog.x.misc.log1,
80 sctp_clog.x.misc.log2,
81 sctp_clog.x.misc.log3,
82 sctp_clog.x.misc.log4);
83 }
84
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 struct sctp_cwnd_log sctp_clog;
89
90 sctp_clog.x.close.inp = (void *)inp;
91 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 if (stcb) {
93 sctp_clog.x.close.stcb = (void *)stcb;
94 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 } else {
96 sctp_clog.x.close.stcb = 0;
97 sctp_clog.x.close.state = 0;
98 }
99 sctp_clog.x.close.loc = loc;
100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 SCTP_LOG_EVENT_CLOSE,
102 0,
103 sctp_clog.x.misc.log1,
104 sctp_clog.x.misc.log2,
105 sctp_clog.x.misc.log3,
106 sctp_clog.x.misc.log4);
107 }
108
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 struct sctp_cwnd_log sctp_clog;
113
114 memset(&sctp_clog, 0, sizeof(sctp_clog));
115 sctp_clog.x.rto.net = (void *)net;
116 sctp_clog.x.rto.rtt = net->rtt / 1000;
117 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 SCTP_LOG_EVENT_RTT,
119 from,
120 sctp_clog.x.misc.log1,
121 sctp_clog.x.misc.log2,
122 sctp_clog.x.misc.log3,
123 sctp_clog.x.misc.log4);
124 }
125
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 struct sctp_cwnd_log sctp_clog;
130
131 sctp_clog.x.strlog.stcb = stcb;
132 sctp_clog.x.strlog.n_tsn = tsn;
133 sctp_clog.x.strlog.n_sseq = sseq;
134 sctp_clog.x.strlog.e_tsn = 0;
135 sctp_clog.x.strlog.e_sseq = 0;
136 sctp_clog.x.strlog.strm = stream;
137 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 SCTP_LOG_EVENT_STRM,
139 from,
140 sctp_clog.x.misc.log1,
141 sctp_clog.x.misc.log2,
142 sctp_clog.x.misc.log3,
143 sctp_clog.x.misc.log4);
144 }
145
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 struct sctp_cwnd_log sctp_clog;
150
151 sctp_clog.x.nagle.stcb = (void *)stcb;
152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 SCTP_LOG_EVENT_NAGLE,
158 action,
159 sctp_clog.x.misc.log1,
160 sctp_clog.x.misc.log2,
161 sctp_clog.x.misc.log3,
162 sctp_clog.x.misc.log4);
163 }
164
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 struct sctp_cwnd_log sctp_clog;
169
170 sctp_clog.x.sack.cumack = cumack;
171 sctp_clog.x.sack.oldcumack = old_cumack;
172 sctp_clog.x.sack.tsn = tsn;
173 sctp_clog.x.sack.numGaps = gaps;
174 sctp_clog.x.sack.numDups = dups;
175 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 SCTP_LOG_EVENT_SACK,
177 from,
178 sctp_clog.x.misc.log1,
179 sctp_clog.x.misc.log2,
180 sctp_clog.x.misc.log3,
181 sctp_clog.x.misc.log4);
182 }
183
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 struct sctp_cwnd_log sctp_clog;
188
189 memset(&sctp_clog, 0, sizeof(sctp_clog));
190 sctp_clog.x.map.base = map;
191 sctp_clog.x.map.cum = cum;
192 sctp_clog.x.map.high = high;
193 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 SCTP_LOG_EVENT_MAP,
195 from,
196 sctp_clog.x.misc.log1,
197 sctp_clog.x.misc.log2,
198 sctp_clog.x.misc.log3,
199 sctp_clog.x.misc.log4);
200 }
201
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 struct sctp_cwnd_log sctp_clog;
206
207 memset(&sctp_clog, 0, sizeof(sctp_clog));
208 sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 sctp_clog.x.fr.tsn = tsn;
211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 SCTP_LOG_EVENT_FR,
213 from,
214 sctp_clog.x.misc.log1,
215 sctp_clog.x.misc.log2,
216 sctp_clog.x.misc.log3,
217 sctp_clog.x.misc.log4);
218 }
219
220 void
221 sctp_log_mb(struct mbuf *m, int from)
222 {
223 struct sctp_cwnd_log sctp_clog;
224
225 sctp_clog.x.mb.mp = m;
226 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229 if (SCTP_BUF_IS_EXTENDED(m)) {
230 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232 } else {
233 sctp_clog.x.mb.ext = 0;
234 sctp_clog.x.mb.refcnt = 0;
235 }
236 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237 SCTP_LOG_EVENT_MBUF,
238 from,
239 sctp_clog.x.misc.log1,
240 sctp_clog.x.misc.log2,
241 sctp_clog.x.misc.log3,
242 sctp_clog.x.misc.log4);
243 }
244
245 void
246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247 {
248 struct sctp_cwnd_log sctp_clog;
249
250 if (control == NULL) {
251 SCTP_PRINTF("Gak log of NULL?\n");
252 return;
253 }
254 sctp_clog.x.strlog.stcb = control->stcb;
255 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 sctp_clog.x.strlog.strm = control->sinfo_stream;
258 if (poschk != NULL) {
259 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 } else {
262 sctp_clog.x.strlog.e_tsn = 0;
263 sctp_clog.x.strlog.e_sseq = 0;
264 }
265 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 SCTP_LOG_EVENT_STRM,
267 from,
268 sctp_clog.x.misc.log1,
269 sctp_clog.x.misc.log2,
270 sctp_clog.x.misc.log3,
271 sctp_clog.x.misc.log4);
272 }
273
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 struct sctp_cwnd_log sctp_clog;
278
279 sctp_clog.x.cwnd.net = net;
280 if (stcb->asoc.send_queue_cnt > 255)
281 sctp_clog.x.cwnd.cnt_in_send = 255;
282 else
283 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 if (stcb->asoc.stream_queue_cnt > 255)
285 sctp_clog.x.cwnd.cnt_in_str = 255;
286 else
287 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288
289 if (net) {
290 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 sctp_clog.x.cwnd.inflight = net->flight_size;
292 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 }
296 if (SCTP_CWNDLOG_PRESEND == from) {
297 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 }
299 sctp_clog.x.cwnd.cwnd_augment = augment;
300 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 SCTP_LOG_EVENT_CWND,
302 from,
303 sctp_clog.x.misc.log1,
304 sctp_clog.x.misc.log2,
305 sctp_clog.x.misc.log3,
306 sctp_clog.x.misc.log4);
307 }
308
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 struct sctp_cwnd_log sctp_clog;
313
314 memset(&sctp_clog, 0, sizeof(sctp_clog));
315 if (inp) {
316 sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317
318 } else {
319 sctp_clog.x.lock.sock = (void *)NULL;
320 }
321 sctp_clog.x.lock.inp = (void *)inp;
322 if (stcb) {
323 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 } else {
325 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 }
327 if (inp) {
328 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 } else {
331 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 }
334 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 if (inp && (inp->sctp_socket)) {
336 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 } else {
340 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 }
344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 SCTP_LOG_LOCK_EVENT,
346 from,
347 sctp_clog.x.misc.log1,
348 sctp_clog.x.misc.log2,
349 sctp_clog.x.misc.log3,
350 sctp_clog.x.misc.log4);
351 }
352
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 struct sctp_cwnd_log sctp_clog;
357
358 memset(&sctp_clog, 0, sizeof(sctp_clog));
359 sctp_clog.x.cwnd.net = net;
360 sctp_clog.x.cwnd.cwnd_new_value = error;
361 sctp_clog.x.cwnd.inflight = net->flight_size;
362 sctp_clog.x.cwnd.cwnd_augment = burst;
363 if (stcb->asoc.send_queue_cnt > 255)
364 sctp_clog.x.cwnd.cnt_in_send = 255;
365 else
366 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 if (stcb->asoc.stream_queue_cnt > 255)
368 sctp_clog.x.cwnd.cnt_in_str = 255;
369 else
370 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 SCTP_LOG_EVENT_MAXBURST,
373 from,
374 sctp_clog.x.misc.log1,
375 sctp_clog.x.misc.log2,
376 sctp_clog.x.misc.log3,
377 sctp_clog.x.misc.log4);
378 }
379
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 struct sctp_cwnd_log sctp_clog;
384
385 sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 sctp_clog.x.rwnd.send_size = snd_size;
387 sctp_clog.x.rwnd.overhead = overhead;
388 sctp_clog.x.rwnd.new_rwnd = 0;
389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 SCTP_LOG_EVENT_RWND,
391 from,
392 sctp_clog.x.misc.log1,
393 sctp_clog.x.misc.log2,
394 sctp_clog.x.misc.log3,
395 sctp_clog.x.misc.log4);
396 }
397
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 struct sctp_cwnd_log sctp_clog;
402
403 sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 sctp_clog.x.rwnd.send_size = flight_size;
405 sctp_clog.x.rwnd.overhead = overhead;
406 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 SCTP_LOG_EVENT_RWND,
409 from,
410 sctp_clog.x.misc.log1,
411 sctp_clog.x.misc.log2,
412 sctp_clog.x.misc.log3,
413 sctp_clog.x.misc.log4);
414 }
415
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 struct sctp_cwnd_log sctp_clog;
420
421 sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 sctp_clog.x.mbcnt.size_change = book;
423 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 SCTP_LOG_EVENT_MBCNT,
427 from,
428 sctp_clog.x.misc.log1,
429 sctp_clog.x.misc.log2,
430 sctp_clog.x.misc.log3,
431 sctp_clog.x.misc.log4);
432 }
433
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 SCTP_LOG_MISC_EVENT,
439 from,
440 a, b, c, d);
441 }
442
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 struct sctp_cwnd_log sctp_clog;
447
448 sctp_clog.x.wake.stcb = (void *)stcb;
449 sctp_clog.x.wake.wake_cnt = wake_cnt;
450 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453
454 if (stcb->asoc.stream_queue_cnt < 0xff)
455 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 else
457 sctp_clog.x.wake.stream_qcnt = 0xff;
458
459 if (stcb->asoc.chunks_on_out_queue < 0xff)
460 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 else
462 sctp_clog.x.wake.chunks_on_oque = 0xff;
463
464 sctp_clog.x.wake.sctpflags = 0;
465 /* set in the defered mode stuff */
466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 sctp_clog.x.wake.sctpflags |= 1;
468 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 sctp_clog.x.wake.sctpflags |= 2;
470 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 sctp_clog.x.wake.sctpflags |= 4;
472 /* what about the sb */
473 if (stcb->sctp_socket) {
474 struct socket *so = stcb->sctp_socket;
475
476 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 } else {
478 sctp_clog.x.wake.sbflags = 0xff;
479 }
480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 SCTP_LOG_EVENT_WAKE,
482 from,
483 sctp_clog.x.misc.log1,
484 sctp_clog.x.misc.log2,
485 sctp_clog.x.misc.log3,
486 sctp_clog.x.misc.log4);
487 }
488
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 struct sctp_cwnd_log sctp_clog;
493
494 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 sctp_clog.x.blk.sndlen = sendlen;
501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 SCTP_LOG_EVENT_BLOCK,
503 from,
504 sctp_clog.x.misc.log1,
505 sctp_clog.x.misc.log2,
506 sctp_clog.x.misc.log3,
507 sctp_clog.x.misc.log4);
508 }
509
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 /* May need to fix this if ktrdump does not work */
514 return (0);
515 }
516
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 int i;
526 int cnt;
527
528 cnt = 0;
529 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 if ((sctp_audit_data[i][0] == 0xe0) &&
531 (sctp_audit_data[i][1] == 0x01)) {
532 cnt = 0;
533 SCTP_PRINTF("\n");
534 } else if (sctp_audit_data[i][0] == 0xf0) {
535 cnt = 0;
536 SCTP_PRINTF("\n");
537 } else if ((sctp_audit_data[i][0] == 0xc0) &&
538 (sctp_audit_data[i][1] == 0x01)) {
539 SCTP_PRINTF("\n");
540 cnt = 0;
541 }
542 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 (uint32_t) sctp_audit_data[i][1]);
544 cnt++;
545 if ((cnt % 14) == 0)
546 SCTP_PRINTF("\n");
547 }
548 for (i = 0; i < sctp_audit_indx; i++) {
549 if ((sctp_audit_data[i][0] == 0xe0) &&
550 (sctp_audit_data[i][1] == 0x01)) {
551 cnt = 0;
552 SCTP_PRINTF("\n");
553 } else if (sctp_audit_data[i][0] == 0xf0) {
554 cnt = 0;
555 SCTP_PRINTF("\n");
556 } else if ((sctp_audit_data[i][0] == 0xc0) &&
557 (sctp_audit_data[i][1] == 0x01)) {
558 SCTP_PRINTF("\n");
559 cnt = 0;
560 }
561 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 (uint32_t) sctp_audit_data[i][1]);
563 cnt++;
564 if ((cnt % 14) == 0)
565 SCTP_PRINTF("\n");
566 }
567 SCTP_PRINTF("\n");
568 }
569
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572 struct sctp_nets *net)
573 {
574 int resend_cnt, tot_out, rep, tot_book_cnt;
575 struct sctp_nets *lnet;
576 struct sctp_tmit_chunk *chk;
577
578 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 sctp_audit_indx++;
581 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 sctp_audit_indx = 0;
583 }
584 if (inp == NULL) {
585 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 sctp_audit_indx++;
588 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 sctp_audit_indx = 0;
590 }
591 return;
592 }
593 if (stcb == NULL) {
594 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 sctp_audit_indx++;
597 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 sctp_audit_indx = 0;
599 }
600 return;
601 }
602 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 sctp_audit_data[sctp_audit_indx][1] =
604 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 sctp_audit_indx++;
606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 sctp_audit_indx = 0;
608 }
609 rep = 0;
610 tot_book_cnt = 0;
611 resend_cnt = tot_out = 0;
612 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 resend_cnt++;
615 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 tot_out += chk->book_size;
617 tot_book_cnt++;
618 }
619 }
620 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 sctp_audit_indx++;
624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 sctp_audit_indx = 0;
626 }
627 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 rep = 1;
630 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 sctp_audit_data[sctp_audit_indx][1] =
633 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 sctp_audit_indx++;
635 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 sctp_audit_indx = 0;
637 }
638 }
639 if (tot_out != stcb->asoc.total_flight) {
640 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 sctp_audit_indx++;
643 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 sctp_audit_indx = 0;
645 }
646 rep = 1;
647 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 (int)stcb->asoc.total_flight);
649 stcb->asoc.total_flight = tot_out;
650 }
651 if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 sctp_audit_indx++;
655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 sctp_audit_indx = 0;
657 }
658 rep = 1;
659 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660
661 stcb->asoc.total_flight_count = tot_book_cnt;
662 }
663 tot_out = 0;
664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 tot_out += lnet->flight_size;
666 }
667 if (tot_out != stcb->asoc.total_flight) {
668 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 sctp_audit_indx++;
671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 sctp_audit_indx = 0;
673 }
674 rep = 1;
675 SCTP_PRINTF("real flight:%d net total was %d\n",
676 stcb->asoc.total_flight, tot_out);
677 /* now corrective action */
678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679
680 tot_out = 0;
681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 if ((chk->whoTo == lnet) &&
683 (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 tot_out += chk->book_size;
685 }
686 }
687 if (lnet->flight_size != tot_out) {
688 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 (void *)lnet, lnet->flight_size,
690 tot_out);
691 lnet->flight_size = tot_out;
692 }
693 }
694 }
695 if (rep) {
696 sctp_print_audit_report();
697 }
698 }
699
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703
704 sctp_audit_data[sctp_audit_indx][0] = ev;
705 sctp_audit_data[sctp_audit_indx][1] = fd;
706 sctp_audit_indx++;
707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 sctp_audit_indx = 0;
709 }
710 }
711
712 #endif
713
714 /*
715 * sctp_stop_timers_for_shutdown() should be called
716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717 * state to make sure that all timers are stopped.
718 */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 struct sctp_association *asoc;
723 struct sctp_nets *net;
724
725 asoc = &stcb->asoc;
726
727 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 }
736 }
737
738 /*
739 * a list of sizes based on typical mtu's, used only if next hop size not
740 * returned.
741 */
742 static uint32_t sctp_mtu_sizes[] = {
743 68,
744 296,
745 508,
746 512,
747 544,
748 576,
749 1006,
750 1492,
751 1500,
752 1536,
753 2002,
754 2048,
755 4352,
756 4464,
757 8166,
758 17914,
759 32000,
760 65535
761 };
762
763 /*
764 * Return the largest MTU smaller than val. If there is no
765 * entry, just return val.
766 */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 uint32_t i;
771
772 if (val <= sctp_mtu_sizes[0]) {
773 return (val);
774 }
775 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 if (val <= sctp_mtu_sizes[i]) {
777 break;
778 }
779 }
780 return (sctp_mtu_sizes[i - 1]);
781 }
782
783 /*
784 * Return the smallest MTU larger than val. If there is no
785 * entry, just return val.
786 */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 /* select another MTU that is just bigger than this one */
791 uint32_t i;
792
793 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 if (val < sctp_mtu_sizes[i]) {
795 return (sctp_mtu_sizes[i]);
796 }
797 }
798 return (val);
799 }
800
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 /*
805 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 * our counter. The result becomes our good random numbers and we
807 * then setup to give these out. Note that we do no locking to
808 * protect this. This is ok, since if competing folks call this we
809 * will get more gobbled gook in the random store which is what we
810 * want. There is a danger that two guys will use the same random
811 * numbers, but thats ok too since that is random as well :->
812 */
813 m->store_at = 0;
814 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 sizeof(m->random_counter), (uint8_t *) m->random_store);
817 m->random_counter++;
818 }
819
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 /*
824 * A true implementation should use random selection process to get
825 * the initial stream sequence number, using RFC1750 as a good
826 * guideline
827 */
828 uint32_t x, *xp;
829 uint8_t *p;
830 int store_at, new_store;
831
832 if (inp->initial_sequence_debug != 0) {
833 uint32_t ret;
834
835 ret = inp->initial_sequence_debug;
836 inp->initial_sequence_debug++;
837 return (ret);
838 }
839 retry:
840 store_at = inp->store_at;
841 new_store = store_at + sizeof(uint32_t);
842 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 new_store = 0;
844 }
845 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 goto retry;
847 }
848 if (new_store == 0) {
849 /* Refill the random store */
850 sctp_fill_random_store(inp);
851 }
852 p = &inp->random_store[store_at];
853 xp = (uint32_t *) p;
854 x = *xp;
855 return (x);
856 }
857
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 uint32_t x;
862 struct timeval now;
863
864 if (check) {
865 (void)SCTP_GETTIME_TIMEVAL(&now);
866 }
867 for (;;) {
868 x = sctp_select_initial_TSN(&inp->sctp_ep);
869 if (x == 0) {
870 /* we never use 0 */
871 continue;
872 }
873 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 break;
875 }
876 }
877 return (x);
878 }
879
880 int
881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882 uint32_t override_tag, uint32_t vrf_id)
883 {
884 struct sctp_association *asoc;
885
886 /*
887 * Anything set to zero is taken care of by the allocation routine's
888 * bzero
889 */
890
891 /*
892 * Up front select what scoping to apply on addresses I tell my peer
893 * Not sure what to do with these right now, we will need to come up
894 * with a way to set them. We may need to pass them through from the
895 * caller in the sctp_aloc_assoc() function.
896 */
897 int i;
898
899 asoc = &stcb->asoc;
900 /* init all variables to a known value. */
901 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902 asoc->max_burst = inp->sctp_ep.max_burst;
903 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907 asoc->ecn_allowed = inp->sctp_ecn_enable;
908 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909 asoc->sctp_cmt_pf = (uint8_t) 0;
910 asoc->sctp_frag_point = inp->sctp_frag_point;
911 asoc->sctp_features = inp->sctp_features;
912 asoc->default_dscp = inp->sctp_ep.default_dscp;
913 #ifdef INET6
914 if (inp->sctp_ep.default_flowlabel) {
915 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
916 } else {
917 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
919 asoc->default_flowlabel &= 0x000fffff;
920 asoc->default_flowlabel |= 0x80000000;
921 } else {
922 asoc->default_flowlabel = 0;
923 }
924 }
925 #endif
926 asoc->sb_send_resv = 0;
927 if (override_tag) {
928 asoc->my_vtag = override_tag;
929 } else {
930 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931 }
932 /* Get the nonce tags */
933 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935 asoc->vrf_id = vrf_id;
936
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 asoc->tsn_in_at = 0;
939 asoc->tsn_out_at = 0;
940 asoc->tsn_in_wrapped = 0;
941 asoc->tsn_out_wrapped = 0;
942 asoc->cumack_log_at = 0;
943 asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 asoc->fs_index = 0;
947 #endif
948 asoc->refcnt = 0;
949 asoc->assoc_up_sent = 0;
950 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951 sctp_select_initial_TSN(&inp->sctp_ep);
952 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953 /* we are optimisitic here */
954 asoc->peer_supports_pktdrop = 1;
955 asoc->peer_supports_nat = 0;
956 asoc->sent_queue_retran_cnt = 0;
957
958 /* for CMT */
959 asoc->last_net_cmt_send_started = NULL;
960
961 /* This will need to be adjusted */
962 asoc->last_acked_seq = asoc->init_seq_number - 1;
963 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964 asoc->asconf_seq_in = asoc->last_acked_seq;
965
966 /* here we are different, we hold the next one we expect */
967 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968
969 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
970 asoc->initial_rto = inp->sctp_ep.initial_rto;
971
972 asoc->max_init_times = inp->sctp_ep.max_init_times;
973 asoc->max_send_times = inp->sctp_ep.max_send_times;
974 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
975 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
976 asoc->free_chunk_cnt = 0;
977
978 asoc->iam_blocking = 0;
979 asoc->context = inp->sctp_context;
980 asoc->local_strreset_support = inp->local_strreset_support;
981 asoc->def_send = inp->def_send;
982 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
984 asoc->pr_sctp_cnt = 0;
985 asoc->total_output_queue_size = 0;
986
987 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 asoc->scope.ipv6_addr_legal = 1;
989 if (SCTP_IPV6_V6ONLY(inp) == 0) {
990 asoc->scope.ipv4_addr_legal = 1;
991 } else {
992 asoc->scope.ipv4_addr_legal = 0;
993 }
994 } else {
995 asoc->scope.ipv6_addr_legal = 0;
996 asoc->scope.ipv4_addr_legal = 1;
997 }
998
999 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1000 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1001
1002 asoc->smallest_mtu = inp->sctp_frag_point;
1003 asoc->minrto = inp->sctp_ep.sctp_minrto;
1004 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1005
1006 asoc->locked_on_sending = NULL;
1007 asoc->stream_locked_on = 0;
1008 asoc->ecn_echo_cnt_onq = 0;
1009 asoc->stream_locked = 0;
1010
1011 asoc->send_sack = 1;
1012
1013 LIST_INIT(&asoc->sctp_restricted_addrs);
1014
1015 TAILQ_INIT(&asoc->nets);
1016 TAILQ_INIT(&asoc->pending_reply_queue);
1017 TAILQ_INIT(&asoc->asconf_ack_sent);
1018 /* Setup to fill the hb random cache at first HB */
1019 asoc->hb_random_idx = 4;
1020
1021 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1022
1023 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1024 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1025
1026 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1027 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1028
1029 /*
1030 * Now the stream parameters, here we allocate space for all streams
1031 * that we request by default.
1032 */
1033 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1034 inp->sctp_ep.pre_open_stream_count;
1035 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1036 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1037 SCTP_M_STRMO);
1038 if (asoc->strmout == NULL) {
1039 /* big trouble no memory */
1040 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1041 return (ENOMEM);
1042 }
1043 for (i = 0; i < asoc->streamoutcnt; i++) {
1044 /*
1045 * inbound side must be set to 0xffff, also NOTE when we get
1046 * the INIT-ACK back (for INIT sender) we MUST reduce the
1047 * count (streamoutcnt) but first check if we sent to any of
1048 * the upper streams that were dropped (if some were). Those
1049 * that were dropped must be notified to the upper layer as
1050 * failed to send.
1051 */
1052 asoc->strmout[i].next_sequence_send = 0x0;
1053 TAILQ_INIT(&asoc->strmout[i].outqueue);
1054 asoc->strmout[i].chunks_on_queues = 0;
1055 asoc->strmout[i].stream_no = i;
1056 asoc->strmout[i].last_msg_incomplete = 0;
1057 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1058 }
1059 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1060
1061 /* Now the mapping array */
1062 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1063 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1064 SCTP_M_MAP);
1065 if (asoc->mapping_array == NULL) {
1066 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1067 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1068 return (ENOMEM);
1069 }
1070 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1071 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1072 SCTP_M_MAP);
1073 if (asoc->nr_mapping_array == NULL) {
1074 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1076 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1077 return (ENOMEM);
1078 }
1079 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1080
1081 /* Now the init of the other outqueues */
1082 TAILQ_INIT(&asoc->free_chunks);
1083 TAILQ_INIT(&asoc->control_send_queue);
1084 TAILQ_INIT(&asoc->asconf_send_queue);
1085 TAILQ_INIT(&asoc->send_queue);
1086 TAILQ_INIT(&asoc->sent_queue);
1087 TAILQ_INIT(&asoc->reasmqueue);
1088 TAILQ_INIT(&asoc->resetHead);
1089 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1090 TAILQ_INIT(&asoc->asconf_queue);
1091 /* authentication fields */
1092 asoc->authinfo.random = NULL;
1093 asoc->authinfo.active_keyid = 0;
1094 asoc->authinfo.assoc_key = NULL;
1095 asoc->authinfo.assoc_keyid = 0;
1096 asoc->authinfo.recv_key = NULL;
1097 asoc->authinfo.recv_keyid = 0;
1098 LIST_INIT(&asoc->shared_keys);
1099 asoc->marked_retrans = 0;
1100 asoc->port = inp->sctp_ep.port;
1101 asoc->timoinit = 0;
1102 asoc->timodata = 0;
1103 asoc->timosack = 0;
1104 asoc->timoshutdown = 0;
1105 asoc->timoheartbeat = 0;
1106 asoc->timocookie = 0;
1107 asoc->timoshutdownack = 0;
1108 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1109 asoc->discontinuity_time = asoc->start_time;
1110 /*
1111 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1112 * freed later when the association is freed.
1113 */
1114 return (0);
1115 }
1116
1117 void
1118 sctp_print_mapping_array(struct sctp_association *asoc)
1119 {
1120 unsigned int i, limit;
1121
1122 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1123 asoc->mapping_array_size,
1124 asoc->mapping_array_base_tsn,
1125 asoc->cumulative_tsn,
1126 asoc->highest_tsn_inside_map,
1127 asoc->highest_tsn_inside_nr_map);
1128 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1129 if (asoc->mapping_array[limit - 1] != 0) {
1130 break;
1131 }
1132 }
1133 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1134 for (i = 0; i < limit; i++) {
1135 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1136 }
1137 if (limit % 16)
1138 SCTP_PRINTF("\n");
1139 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1140 if (asoc->nr_mapping_array[limit - 1]) {
1141 break;
1142 }
1143 }
1144 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1145 for (i = 0; i < limit; i++) {
1146 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1147 }
1148 if (limit % 16)
1149 SCTP_PRINTF("\n");
1150 }
1151
1152 int
1153 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1154 {
1155 /* mapping array needs to grow */
1156 uint8_t *new_array1, *new_array2;
1157 uint32_t new_size;
1158
1159 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1160 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1161 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1162 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1163 /* can't get more, forget it */
1164 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1165 if (new_array1) {
1166 SCTP_FREE(new_array1, SCTP_M_MAP);
1167 }
1168 if (new_array2) {
1169 SCTP_FREE(new_array2, SCTP_M_MAP);
1170 }
1171 return (-1);
1172 }
1173 memset(new_array1, 0, new_size);
1174 memset(new_array2, 0, new_size);
1175 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1176 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1177 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1178 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1179 asoc->mapping_array = new_array1;
1180 asoc->nr_mapping_array = new_array2;
1181 asoc->mapping_array_size = new_size;
1182 return (0);
1183 }
1184
1185
1186 static void
1187 sctp_iterator_work(struct sctp_iterator *it)
1188 {
1189 int iteration_count = 0;
1190 int inp_skip = 0;
1191 int first_in = 1;
1192 struct sctp_inpcb *tinp;
1193
1194 SCTP_INP_INFO_RLOCK();
1195 SCTP_ITERATOR_LOCK();
1196 if (it->inp) {
1197 SCTP_INP_RLOCK(it->inp);
1198 SCTP_INP_DECR_REF(it->inp);
1199 }
1200 if (it->inp == NULL) {
1201 /* iterator is complete */
1202 done_with_iterator:
1203 SCTP_ITERATOR_UNLOCK();
1204 SCTP_INP_INFO_RUNLOCK();
1205 if (it->function_atend != NULL) {
1206 (*it->function_atend) (it->pointer, it->val);
1207 }
1208 SCTP_FREE(it, SCTP_M_ITER);
1209 return;
1210 }
1211 select_a_new_ep:
1212 if (first_in) {
1213 first_in = 0;
1214 } else {
1215 SCTP_INP_RLOCK(it->inp);
1216 }
1217 while (((it->pcb_flags) &&
1218 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219 ((it->pcb_features) &&
1220 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221 /* endpoint flags or features don't match, so keep looking */
1222 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223 SCTP_INP_RUNLOCK(it->inp);
1224 goto done_with_iterator;
1225 }
1226 tinp = it->inp;
1227 it->inp = LIST_NEXT(it->inp, sctp_list);
1228 SCTP_INP_RUNLOCK(tinp);
1229 if (it->inp == NULL) {
1230 goto done_with_iterator;
1231 }
1232 SCTP_INP_RLOCK(it->inp);
1233 }
1234 /* now go through each assoc which is in the desired state */
1235 if (it->done_current_ep == 0) {
1236 if (it->function_inp != NULL)
1237 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1238 it->done_current_ep = 1;
1239 }
1240 if (it->stcb == NULL) {
1241 /* run the per instance function */
1242 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1243 }
1244 if ((inp_skip) || it->stcb == NULL) {
1245 if (it->function_inp_end != NULL) {
1246 inp_skip = (*it->function_inp_end) (it->inp,
1247 it->pointer,
1248 it->val);
1249 }
1250 SCTP_INP_RUNLOCK(it->inp);
1251 goto no_stcb;
1252 }
1253 while (it->stcb) {
1254 SCTP_TCB_LOCK(it->stcb);
1255 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1256 /* not in the right state... keep looking */
1257 SCTP_TCB_UNLOCK(it->stcb);
1258 goto next_assoc;
1259 }
1260 /* see if we have limited out the iterator loop */
1261 iteration_count++;
1262 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1263 /* Pause to let others grab the lock */
1264 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1265 SCTP_TCB_UNLOCK(it->stcb);
1266 SCTP_INP_INCR_REF(it->inp);
1267 SCTP_INP_RUNLOCK(it->inp);
1268 SCTP_ITERATOR_UNLOCK();
1269 SCTP_INP_INFO_RUNLOCK();
1270 SCTP_INP_INFO_RLOCK();
1271 SCTP_ITERATOR_LOCK();
1272 if (sctp_it_ctl.iterator_flags) {
1273 /* We won't be staying here */
1274 SCTP_INP_DECR_REF(it->inp);
1275 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1276 if (sctp_it_ctl.iterator_flags &
1277 SCTP_ITERATOR_STOP_CUR_IT) {
1278 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1279 goto done_with_iterator;
1280 }
1281 if (sctp_it_ctl.iterator_flags &
1282 SCTP_ITERATOR_STOP_CUR_INP) {
1283 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1284 goto no_stcb;
1285 }
1286 /* If we reach here huh? */
1287 SCTP_PRINTF("Unknown it ctl flag %x\n",
1288 sctp_it_ctl.iterator_flags);
1289 sctp_it_ctl.iterator_flags = 0;
1290 }
1291 SCTP_INP_RLOCK(it->inp);
1292 SCTP_INP_DECR_REF(it->inp);
1293 SCTP_TCB_LOCK(it->stcb);
1294 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295 iteration_count = 0;
1296 }
1297 /* run function on this one */
1298 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1299
1300 /*
1301 * we lie here, it really needs to have its own type but
1302 * first I must verify that this won't effect things :-0
1303 */
1304 if (it->no_chunk_output == 0)
1305 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1306
1307 SCTP_TCB_UNLOCK(it->stcb);
1308 next_assoc:
1309 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1310 if (it->stcb == NULL) {
1311 /* Run last function */
1312 if (it->function_inp_end != NULL) {
1313 inp_skip = (*it->function_inp_end) (it->inp,
1314 it->pointer,
1315 it->val);
1316 }
1317 }
1318 }
1319 SCTP_INP_RUNLOCK(it->inp);
1320 no_stcb:
1321 /* done with all assocs on this endpoint, move on to next endpoint */
1322 it->done_current_ep = 0;
1323 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1324 it->inp = NULL;
1325 } else {
1326 it->inp = LIST_NEXT(it->inp, sctp_list);
1327 }
1328 if (it->inp == NULL) {
1329 goto done_with_iterator;
1330 }
1331 goto select_a_new_ep;
1332 }
1333
1334 void
1335 sctp_iterator_worker(void)
1336 {
1337 struct sctp_iterator *it, *nit;
1338
1339 /* This function is called with the WQ lock in place */
1340
1341 sctp_it_ctl.iterator_running = 1;
1342 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1343 sctp_it_ctl.cur_it = it;
1344 /* now lets work on this one */
1345 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1346 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1347 CURVNET_SET(it->vn);
1348 sctp_iterator_work(it);
1349 sctp_it_ctl.cur_it = NULL;
1350 CURVNET_RESTORE();
1351 SCTP_IPI_ITERATOR_WQ_LOCK();
1352 /* sa_ignore FREED_MEMORY */
1353 }
1354 sctp_it_ctl.iterator_running = 0;
1355 return;
1356 }
1357
1358
1359 static void
1360 sctp_handle_addr_wq(void)
1361 {
1362 /* deal with the ADDR wq from the rtsock calls */
1363 struct sctp_laddr *wi, *nwi;
1364 struct sctp_asconf_iterator *asc;
1365
1366 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1367 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1368 if (asc == NULL) {
1369 /* Try later, no memory */
1370 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1371 (struct sctp_inpcb *)NULL,
1372 (struct sctp_tcb *)NULL,
1373 (struct sctp_nets *)NULL);
1374 return;
1375 }
1376 LIST_INIT(&asc->list_of_work);
1377 asc->cnt = 0;
1378
1379 SCTP_WQ_ADDR_LOCK();
1380 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1381 LIST_REMOVE(wi, sctp_nxt_addr);
1382 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1383 asc->cnt++;
1384 }
1385 SCTP_WQ_ADDR_UNLOCK();
1386
1387 if (asc->cnt == 0) {
1388 SCTP_FREE(asc, SCTP_M_ASC_IT);
1389 } else {
1390 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1391 sctp_asconf_iterator_stcb,
1392 NULL, /* No ep end for boundall */
1393 SCTP_PCB_FLAGS_BOUNDALL,
1394 SCTP_PCB_ANY_FEATURES,
1395 SCTP_ASOC_ANY_STATE,
1396 (void *)asc, 0,
1397 sctp_asconf_iterator_end, NULL, 0);
1398 }
1399 }
1400
1401 void
1402 sctp_timeout_handler(void *t)
1403 {
1404 struct sctp_inpcb *inp;
1405 struct sctp_tcb *stcb;
1406 struct sctp_nets *net;
1407 struct sctp_timer *tmr;
1408
1409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1410 struct socket *so;
1411
1412 #endif
1413 int did_output, type;
1414
1415 tmr = (struct sctp_timer *)t;
1416 inp = (struct sctp_inpcb *)tmr->ep;
1417 stcb = (struct sctp_tcb *)tmr->tcb;
1418 net = (struct sctp_nets *)tmr->net;
1419 CURVNET_SET((struct vnet *)tmr->vnet);
1420 did_output = 1;
1421
1422 #ifdef SCTP_AUDITING_ENABLED
1423 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1424 sctp_auditing(3, inp, stcb, net);
1425 #endif
1426
1427 /* sanity checks... */
1428 if (tmr->self != (void *)tmr) {
1429 /*
1430 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1431 * (void *)tmr);
1432 */
1433 CURVNET_RESTORE();
1434 return;
1435 }
1436 tmr->stopped_from = 0xa001;
1437 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1438 /*
1439 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1440 * tmr->type);
1441 */
1442 CURVNET_RESTORE();
1443 return;
1444 }
1445 tmr->stopped_from = 0xa002;
1446 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1447 CURVNET_RESTORE();
1448 return;
1449 }
1450 /* if this is an iterator timeout, get the struct and clear inp */
1451 tmr->stopped_from = 0xa003;
1452 type = tmr->type;
1453 if (inp) {
1454 SCTP_INP_INCR_REF(inp);
1455 if ((inp->sctp_socket == NULL) &&
1456 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1457 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1458 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1459 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1460 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1461 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1462 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1463 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1464 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1465 ) {
1466 SCTP_INP_DECR_REF(inp);
1467 CURVNET_RESTORE();
1468 return;
1469 }
1470 }
1471 tmr->stopped_from = 0xa004;
1472 if (stcb) {
1473 atomic_add_int(&stcb->asoc.refcnt, 1);
1474 if (stcb->asoc.state == 0) {
1475 atomic_add_int(&stcb->asoc.refcnt, -1);
1476 if (inp) {
1477 SCTP_INP_DECR_REF(inp);
1478 }
1479 CURVNET_RESTORE();
1480 return;
1481 }
1482 }
1483 tmr->stopped_from = 0xa005;
1484 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1485 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1486 if (inp) {
1487 SCTP_INP_DECR_REF(inp);
1488 }
1489 if (stcb) {
1490 atomic_add_int(&stcb->asoc.refcnt, -1);
1491 }
1492 CURVNET_RESTORE();
1493 return;
1494 }
1495 tmr->stopped_from = 0xa006;
1496
1497 if (stcb) {
1498 SCTP_TCB_LOCK(stcb);
1499 atomic_add_int(&stcb->asoc.refcnt, -1);
1500 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1501 ((stcb->asoc.state == 0) ||
1502 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1503 SCTP_TCB_UNLOCK(stcb);
1504 if (inp) {
1505 SCTP_INP_DECR_REF(inp);
1506 }
1507 CURVNET_RESTORE();
1508 return;
1509 }
1510 }
1511 /* record in stopped what t-o occured */
1512 tmr->stopped_from = tmr->type;
1513
1514 /* mark as being serviced now */
1515 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1516 /*
1517 * Callout has been rescheduled.
1518 */
1519 goto get_out;
1520 }
1521 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1522 /*
1523 * Not active, so no action.
1524 */
1525 goto get_out;
1526 }
1527 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1528
1529 /* call the handler for the appropriate timer type */
1530 switch (tmr->type) {
1531 case SCTP_TIMER_TYPE_ZERO_COPY:
1532 if (inp == NULL) {
1533 break;
1534 }
1535 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1536 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1537 }
1538 break;
1539 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1540 if (inp == NULL) {
1541 break;
1542 }
1543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1545 }
1546 break;
1547 case SCTP_TIMER_TYPE_ADDR_WQ:
1548 sctp_handle_addr_wq();
1549 break;
1550 case SCTP_TIMER_TYPE_SEND:
1551 if ((stcb == NULL) || (inp == NULL)) {
1552 break;
1553 }
1554 SCTP_STAT_INCR(sctps_timodata);
1555 stcb->asoc.timodata++;
1556 stcb->asoc.num_send_timers_up--;
1557 if (stcb->asoc.num_send_timers_up < 0) {
1558 stcb->asoc.num_send_timers_up = 0;
1559 }
1560 SCTP_TCB_LOCK_ASSERT(stcb);
1561 if (sctp_t3rxt_timer(inp, stcb, net)) {
1562 /* no need to unlock on tcb its gone */
1563
1564 goto out_decr;
1565 }
1566 SCTP_TCB_LOCK_ASSERT(stcb);
1567 #ifdef SCTP_AUDITING_ENABLED
1568 sctp_auditing(4, inp, stcb, net);
1569 #endif
1570 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571 if ((stcb->asoc.num_send_timers_up == 0) &&
1572 (stcb->asoc.sent_queue_cnt > 0)) {
1573 struct sctp_tmit_chunk *chk;
1574
1575 /*
1576 * safeguard. If there on some on the sent queue
1577 * somewhere but no timers running something is
1578 * wrong... so we start a timer on the first chunk
1579 * on the send queue on whatever net it is sent to.
1580 */
1581 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1582 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1583 chk->whoTo);
1584 }
1585 break;
1586 case SCTP_TIMER_TYPE_INIT:
1587 if ((stcb == NULL) || (inp == NULL)) {
1588 break;
1589 }
1590 SCTP_STAT_INCR(sctps_timoinit);
1591 stcb->asoc.timoinit++;
1592 if (sctp_t1init_timer(inp, stcb, net)) {
1593 /* no need to unlock on tcb its gone */
1594 goto out_decr;
1595 }
1596 /* We do output but not here */
1597 did_output = 0;
1598 break;
1599 case SCTP_TIMER_TYPE_RECV:
1600 if ((stcb == NULL) || (inp == NULL)) {
1601 break;
1602 }
1603 SCTP_STAT_INCR(sctps_timosack);
1604 stcb->asoc.timosack++;
1605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1606 #ifdef SCTP_AUDITING_ENABLED
1607 sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1610 break;
1611 case SCTP_TIMER_TYPE_SHUTDOWN:
1612 if ((stcb == NULL) || (inp == NULL)) {
1613 break;
1614 }
1615 if (sctp_shutdown_timer(inp, stcb, net)) {
1616 /* no need to unlock on tcb its gone */
1617 goto out_decr;
1618 }
1619 SCTP_STAT_INCR(sctps_timoshutdown);
1620 stcb->asoc.timoshutdown++;
1621 #ifdef SCTP_AUDITING_ENABLED
1622 sctp_auditing(4, inp, stcb, net);
1623 #endif
1624 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1625 break;
1626 case SCTP_TIMER_TYPE_HEARTBEAT:
1627 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1628 break;
1629 }
1630 SCTP_STAT_INCR(sctps_timoheartbeat);
1631 stcb->asoc.timoheartbeat++;
1632 if (sctp_heartbeat_timer(inp, stcb, net)) {
1633 /* no need to unlock on tcb its gone */
1634 goto out_decr;
1635 }
1636 #ifdef SCTP_AUDITING_ENABLED
1637 sctp_auditing(4, inp, stcb, net);
1638 #endif
1639 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1640 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1641 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1642 }
1643 break;
1644 case SCTP_TIMER_TYPE_COOKIE:
1645 if ((stcb == NULL) || (inp == NULL)) {
1646 break;
1647 }
1648 if (sctp_cookie_timer(inp, stcb, net)) {
1649 /* no need to unlock on tcb its gone */
1650 goto out_decr;
1651 }
1652 SCTP_STAT_INCR(sctps_timocookie);
1653 stcb->asoc.timocookie++;
1654 #ifdef SCTP_AUDITING_ENABLED
1655 sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 /*
1658 * We consider T3 and Cookie timer pretty much the same with
1659 * respect to where from in chunk_output.
1660 */
1661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662 break;
1663 case SCTP_TIMER_TYPE_NEWCOOKIE:
1664 {
1665 struct timeval tv;
1666 int i, secret;
1667
1668 if (inp == NULL) {
1669 break;
1670 }
1671 SCTP_STAT_INCR(sctps_timosecret);
1672 (void)SCTP_GETTIME_TIMEVAL(&tv);
1673 SCTP_INP_WLOCK(inp);
1674 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1675 inp->sctp_ep.last_secret_number =
1676 inp->sctp_ep.current_secret_number;
1677 inp->sctp_ep.current_secret_number++;
1678 if (inp->sctp_ep.current_secret_number >=
1679 SCTP_HOW_MANY_SECRETS) {
1680 inp->sctp_ep.current_secret_number = 0;
1681 }
1682 secret = (int)inp->sctp_ep.current_secret_number;
1683 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1684 inp->sctp_ep.secret_key[secret][i] =
1685 sctp_select_initial_TSN(&inp->sctp_ep);
1686 }
1687 SCTP_INP_WUNLOCK(inp);
1688 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1689 }
1690 did_output = 0;
1691 break;
1692 case SCTP_TIMER_TYPE_PATHMTURAISE:
1693 if ((stcb == NULL) || (inp == NULL)) {
1694 break;
1695 }
1696 SCTP_STAT_INCR(sctps_timopathmtu);
1697 sctp_pathmtu_timer(inp, stcb, net);
1698 did_output = 0;
1699 break;
1700 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1701 if ((stcb == NULL) || (inp == NULL)) {
1702 break;
1703 }
1704 if (sctp_shutdownack_timer(inp, stcb, net)) {
1705 /* no need to unlock on tcb its gone */
1706 goto out_decr;
1707 }
1708 SCTP_STAT_INCR(sctps_timoshutdownack);
1709 stcb->asoc.timoshutdownack++;
1710 #ifdef SCTP_AUDITING_ENABLED
1711 sctp_auditing(4, inp, stcb, net);
1712 #endif
1713 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1714 break;
1715 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1716 if ((stcb == NULL) || (inp == NULL)) {
1717 break;
1718 }
1719 SCTP_STAT_INCR(sctps_timoshutdownguard);
1720 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1721 /* no need to unlock on tcb its gone */
1722 goto out_decr;
1723
1724 case SCTP_TIMER_TYPE_STRRESET:
1725 if ((stcb == NULL) || (inp == NULL)) {
1726 break;
1727 }
1728 if (sctp_strreset_timer(inp, stcb, net)) {
1729 /* no need to unlock on tcb its gone */
1730 goto out_decr;
1731 }
1732 SCTP_STAT_INCR(sctps_timostrmrst);
1733 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1734 break;
1735 case SCTP_TIMER_TYPE_ASCONF:
1736 if ((stcb == NULL) || (inp == NULL)) {
1737 break;
1738 }
1739 if (sctp_asconf_timer(inp, stcb, net)) {
1740 /* no need to unlock on tcb its gone */
1741 goto out_decr;
1742 }
1743 SCTP_STAT_INCR(sctps_timoasconf);
1744 #ifdef SCTP_AUDITING_ENABLED
1745 sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1748 break;
1749 case SCTP_TIMER_TYPE_PRIM_DELETED:
1750 if ((stcb == NULL) || (inp == NULL)) {
1751 break;
1752 }
1753 sctp_delete_prim_timer(inp, stcb, net);
1754 SCTP_STAT_INCR(sctps_timodelprim);
1755 break;
1756
1757 case SCTP_TIMER_TYPE_AUTOCLOSE:
1758 if ((stcb == NULL) || (inp == NULL)) {
1759 break;
1760 }
1761 SCTP_STAT_INCR(sctps_timoautoclose);
1762 sctp_autoclose_timer(inp, stcb, net);
1763 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1764 did_output = 0;
1765 break;
1766 case SCTP_TIMER_TYPE_ASOCKILL:
1767 if ((stcb == NULL) || (inp == NULL)) {
1768 break;
1769 }
1770 SCTP_STAT_INCR(sctps_timoassockill);
1771 /* Can we free it yet? */
1772 SCTP_INP_DECR_REF(inp);
1773 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1775 so = SCTP_INP_SO(inp);
1776 atomic_add_int(&stcb->asoc.refcnt, 1);
1777 SCTP_TCB_UNLOCK(stcb);
1778 SCTP_SOCKET_LOCK(so, 1);
1779 SCTP_TCB_LOCK(stcb);
1780 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1781 #endif
1782 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 SCTP_SOCKET_UNLOCK(so, 1);
1785 #endif
1786 /*
1787 * free asoc, always unlocks (or destroy's) so prevent
1788 * duplicate unlock or unlock of a free mtx :-0
1789 */
1790 stcb = NULL;
1791 goto out_no_decr;
1792 case SCTP_TIMER_TYPE_INPKILL:
1793 SCTP_STAT_INCR(sctps_timoinpkill);
1794 if (inp == NULL) {
1795 break;
1796 }
1797 /*
1798 * special case, take away our increment since WE are the
1799 * killer
1800 */
1801 SCTP_INP_DECR_REF(inp);
1802 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1803 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1804 SCTP_CALLED_FROM_INPKILL_TIMER);
1805 inp = NULL;
1806 goto out_no_decr;
1807 default:
1808 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1809 tmr->type);
1810 break;
1811 }
1812 #ifdef SCTP_AUDITING_ENABLED
1813 sctp_audit_log(0xF1, (uint8_t) tmr->type);
1814 if (inp)
1815 sctp_auditing(5, inp, stcb, net);
1816 #endif
1817 if ((did_output) && stcb) {
1818 /*
1819 * Now we need to clean up the control chunk chain if an
1820 * ECNE is on it. It must be marked as UNSENT again so next
1821 * call will continue to send it until such time that we get
1822 * a CWR, to remove it. It is, however, less likely that we
1823 * will find a ecn echo on the chain though.
1824 */
1825 sctp_fix_ecn_echo(&stcb->asoc);
1826 }
1827 get_out:
1828 if (stcb) {
1829 SCTP_TCB_UNLOCK(stcb);
1830 }
1831 out_decr:
1832 if (inp) {
1833 SCTP_INP_DECR_REF(inp);
1834 }
1835 out_no_decr:
1836 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1837 type);
1838 CURVNET_RESTORE();
1839 }
1840
1841 void
1842 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1843 struct sctp_nets *net)
1844 {
1845 uint32_t to_ticks;
1846 struct sctp_timer *tmr;
1847
1848 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1849 return;
1850
1851 tmr = NULL;
1852 if (stcb) {
1853 SCTP_TCB_LOCK_ASSERT(stcb);
1854 }
1855 switch (t_type) {
1856 case SCTP_TIMER_TYPE_ZERO_COPY:
1857 tmr = &inp->sctp_ep.zero_copy_timer;
1858 to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1859 break;
1860 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1861 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1862 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1863 break;
1864 case SCTP_TIMER_TYPE_ADDR_WQ:
1865 /* Only 1 tick away :-) */
1866 tmr = &SCTP_BASE_INFO(addr_wq_timer);
1867 to_ticks = SCTP_ADDRESS_TICK_DELAY;
1868 break;
1869 case SCTP_TIMER_TYPE_SEND:
1870 /* Here we use the RTO timer */
1871 {
1872 int rto_val;
1873
1874 if ((stcb == NULL) || (net == NULL)) {
1875 return;
1876 }
1877 tmr = &net->rxt_timer;
1878 if (net->RTO == 0) {
1879 rto_val = stcb->asoc.initial_rto;
1880 } else {
1881 rto_val = net->RTO;
1882 }
1883 to_ticks = MSEC_TO_TICKS(rto_val);
1884 }
1885 break;
1886 case SCTP_TIMER_TYPE_INIT:
1887 /*
1888 * Here we use the INIT timer default usually about 1
1889 * minute.
1890 */
1891 if ((stcb == NULL) || (net == NULL)) {
1892 return;
1893 }
1894 tmr = &net->rxt_timer;
1895 if (net->RTO == 0) {
1896 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1897 } else {
1898 to_ticks = MSEC_TO_TICKS(net->RTO);
1899 }
1900 break;
1901 case SCTP_TIMER_TYPE_RECV:
1902 /*
1903 * Here we use the Delayed-Ack timer value from the inp
1904 * ususually about 200ms.
1905 */
1906 if (stcb == NULL) {
1907 return;
1908 }
1909 tmr = &stcb->asoc.dack_timer;
1910 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1911 break;
1912 case SCTP_TIMER_TYPE_SHUTDOWN:
1913 /* Here we use the RTO of the destination. */
1914 if ((stcb == NULL) || (net == NULL)) {
1915 return;
1916 }
1917 if (net->RTO == 0) {
1918 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1919 } else {
1920 to_ticks = MSEC_TO_TICKS(net->RTO);
1921 }
1922 tmr = &net->rxt_timer;
1923 break;
1924 case SCTP_TIMER_TYPE_HEARTBEAT:
1925 /*
1926 * the net is used here so that we can add in the RTO. Even
1927 * though we use a different timer. We also add the HB timer
1928 * PLUS a random jitter.
1929 */
1930 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1931 return;
1932 } else {
1933 uint32_t rndval;
1934 uint32_t jitter;
1935
1936 if ((net->dest_state & SCTP_ADDR_NOHB) &&
1937 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1938 return;
1939 }
1940 if (net->RTO == 0) {
1941 to_ticks = stcb->asoc.initial_rto;
1942 } else {
1943 to_ticks = net->RTO;
1944 }
1945 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1946 jitter = rndval % to_ticks;
1947 if (jitter >= (to_ticks >> 1)) {
1948 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1949 } else {
1950 to_ticks = to_ticks - jitter;
1951 }
1952 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1953 !(net->dest_state & SCTP_ADDR_PF)) {
1954 to_ticks += net->heart_beat_delay;
1955 }
1956 /*
1957 * Now we must convert the to_ticks that are now in
1958 * ms to ticks.
1959 */
1960 to_ticks = MSEC_TO_TICKS(to_ticks);
1961 tmr = &net->hb_timer;
1962 }
1963 break;
1964 case SCTP_TIMER_TYPE_COOKIE:
1965 /*
1966 * Here we can use the RTO timer from the network since one
1967 * RTT was compelete. If a retran happened then we will be
1968 * using the RTO initial value.
1969 */
1970 if ((stcb == NULL) || (net == NULL)) {
1971 return;
1972 }
1973 if (net->RTO == 0) {
1974 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1975 } else {
1976 to_ticks = MSEC_TO_TICKS(net->RTO);
1977 }
1978 tmr = &net->rxt_timer;
1979 break;
1980 case SCTP_TIMER_TYPE_NEWCOOKIE:
1981 /*
1982 * nothing needed but the endpoint here ususually about 60
1983 * minutes.
1984 */
1985 if (inp == NULL) {
1986 return;
1987 }
1988 tmr = &inp->sctp_ep.signature_change;
1989 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1990 break;
1991 case SCTP_TIMER_TYPE_ASOCKILL:
1992 if (stcb == NULL) {
1993 return;
1994 }
1995 tmr = &stcb->asoc.strreset_timer;
1996 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1997 break;
1998 case SCTP_TIMER_TYPE_INPKILL:
1999 /*
2000 * The inp is setup to die. We re-use the signature_chage
2001 * timer since that has stopped and we are in the GONE
2002 * state.
2003 */
2004 if (inp == NULL) {
2005 return;
2006 }
2007 tmr = &inp->sctp_ep.signature_change;
2008 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2009 break;
2010 case SCTP_TIMER_TYPE_PATHMTURAISE:
2011 /*
2012 * Here we use the value found in the EP for PMTU ususually
2013 * about 10 minutes.
2014 */
2015 if ((stcb == NULL) || (inp == NULL)) {
2016 return;
2017 }
2018 if (net == NULL) {
2019 return;
2020 }
2021 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2022 return;
2023 }
2024 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2025 tmr = &net->pmtu_timer;
2026 break;
2027 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2028 /* Here we use the RTO of the destination */
2029 if ((stcb == NULL) || (net == NULL)) {
2030 return;
2031 }
2032 if (net->RTO == 0) {
2033 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2034 } else {
2035 to_ticks = MSEC_TO_TICKS(net->RTO);
2036 }
2037 tmr = &net->rxt_timer;
2038 break;
2039 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2040 /*
2041 * Here we use the endpoints shutdown guard timer usually
2042 * about 3 minutes.
2043 */
2044 if ((inp == NULL) || (stcb == NULL)) {
2045 return;
2046 }
2047 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2048 tmr = &stcb->asoc.shut_guard_timer;
2049 break;
2050 case SCTP_TIMER_TYPE_STRRESET:
2051 /*
2052 * Here the timer comes from the stcb but its value is from
2053 * the net's RTO.
2054 */
2055 if ((stcb == NULL) || (net == NULL)) {
2056 return;
2057 }
2058 if (net->RTO == 0) {
2059 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2060 } else {
2061 to_ticks = MSEC_TO_TICKS(net->RTO);
2062 }
2063 tmr = &stcb->asoc.strreset_timer;
2064 break;
2065 case SCTP_TIMER_TYPE_ASCONF:
2066 /*
2067 * Here the timer comes from the stcb but its value is from
2068 * the net's RTO.
2069 */
2070 if ((stcb == NULL) || (net == NULL)) {
2071 return;
2072 }
2073 if (net->RTO == 0) {
2074 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2075 } else {
2076 to_ticks = MSEC_TO_TICKS(net->RTO);
2077 }
2078 tmr = &stcb->asoc.asconf_timer;
2079 break;
2080 case SCTP_TIMER_TYPE_PRIM_DELETED:
2081 if ((stcb == NULL) || (net != NULL)) {
2082 return;
2083 }
2084 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2085 tmr = &stcb->asoc.delete_prim_timer;
2086 break;
2087 case SCTP_TIMER_TYPE_AUTOCLOSE:
2088 if (stcb == NULL) {
2089 return;
2090 }
2091 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2092 /*
2093 * Really an error since stcb is NOT set to
2094 * autoclose
2095 */
2096 return;
2097 }
2098 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2099 tmr = &stcb->asoc.autoclose_timer;
2100 break;
2101 default:
2102 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2103 __FUNCTION__, t_type);
2104 return;
2105 break;
2106 }
2107 if ((to_ticks <= 0) || (tmr == NULL)) {
2108 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2109 __FUNCTION__, t_type, to_ticks, (void *)tmr);
2110 return;
2111 }
2112 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2113 /*
2114 * we do NOT allow you to have it already running. if it is
2115 * we leave the current one up unchanged
2116 */
2117 return;
2118 }
2119 /* At this point we can proceed */
2120 if (t_type == SCTP_TIMER_TYPE_SEND) {
2121 stcb->asoc.num_send_timers_up++;
2122 }
2123 tmr->stopped_from = 0;
2124 tmr->type = t_type;
2125 tmr->ep = (void *)inp;
2126 tmr->tcb = (void *)stcb;
2127 tmr->net = (void *)net;
2128 tmr->self = (void *)tmr;
2129 tmr->vnet = (void *)curvnet;
2130 tmr->ticks = sctp_get_tick_count();
2131 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2132 return;
2133 }
2134
2135 void
2136 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2137 struct sctp_nets *net, uint32_t from)
2138 {
2139 struct sctp_timer *tmr;
2140
2141 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2142 (inp == NULL))
2143 return;
2144
2145 tmr = NULL;
2146 if (stcb) {
2147 SCTP_TCB_LOCK_ASSERT(stcb);
2148 }
2149 switch (t_type) {
2150 case SCTP_TIMER_TYPE_ZERO_COPY:
2151 tmr = &inp->sctp_ep.zero_copy_timer;
2152 break;
2153 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2154 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2155 break;
2156 case SCTP_TIMER_TYPE_ADDR_WQ:
2157 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2158 break;
2159 case SCTP_TIMER_TYPE_SEND:
2160 if ((stcb == NULL) || (net == NULL)) {
2161 return;
2162 }
2163 tmr = &net->rxt_timer;
2164 break;
2165 case SCTP_TIMER_TYPE_INIT:
2166 if ((stcb == NULL) || (net == NULL)) {
2167 return;
2168 }
2169 tmr = &net->rxt_timer;
2170 break;
2171 case SCTP_TIMER_TYPE_RECV:
2172 if (stcb == NULL) {
2173 return;
2174 }
2175 tmr = &stcb->asoc.dack_timer;
2176 break;
2177 case SCTP_TIMER_TYPE_SHUTDOWN:
2178 if ((stcb == NULL) || (net == NULL)) {
2179 return;
2180 }
2181 tmr = &net->rxt_timer;
2182 break;
2183 case SCTP_TIMER_TYPE_HEARTBEAT:
2184 if ((stcb == NULL) || (net == NULL)) {
2185 return;
2186 }
2187 tmr = &net->hb_timer;
2188 break;
2189 case SCTP_TIMER_TYPE_COOKIE:
2190 if ((stcb == NULL) || (net == NULL)) {
2191 return;
2192 }
2193 tmr = &net->rxt_timer;
2194 break;
2195 case SCTP_TIMER_TYPE_NEWCOOKIE:
2196 /* nothing needed but the endpoint here */
2197 tmr = &inp->sctp_ep.signature_change;
2198 /*
2199 * We re-use the newcookie timer for the INP kill timer. We
2200 * must assure that we do not kill it by accident.
2201 */
2202 break;
2203 case SCTP_TIMER_TYPE_ASOCKILL:
2204 /*
2205 * Stop the asoc kill timer.
2206 */
2207 if (stcb == NULL) {
2208 return;
2209 }
2210 tmr = &stcb->asoc.strreset_timer;
2211 break;
2212
2213 case SCTP_TIMER_TYPE_INPKILL:
2214 /*
2215 * The inp is setup to die. We re-use the signature_chage
2216 * timer since that has stopped and we are in the GONE
2217 * state.
2218 */
2219 tmr = &inp->sctp_ep.signature_change;
2220 break;
2221 case SCTP_TIMER_TYPE_PATHMTURAISE:
2222 if ((stcb == NULL) || (net == NULL)) {
2223 return;
2224 }
2225 tmr = &net->pmtu_timer;
2226 break;
2227 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2228 if ((stcb == NULL) || (net == NULL)) {
2229 return;
2230 }
2231 tmr = &net->rxt_timer;
2232 break;
2233 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2234 if (stcb == NULL) {
2235 return;
2236 }
2237 tmr = &stcb->asoc.shut_guard_timer;
2238 break;
2239 case SCTP_TIMER_TYPE_STRRESET:
2240 if (stcb == NULL) {
2241 return;
2242 }
2243 tmr = &stcb->asoc.strreset_timer;
2244 break;
2245 case SCTP_TIMER_TYPE_ASCONF:
2246 if (stcb == NULL) {
2247 return;
2248 }
2249 tmr = &stcb->asoc.asconf_timer;
2250 break;
2251 case SCTP_TIMER_TYPE_PRIM_DELETED:
2252 if (stcb == NULL) {
2253 return;
2254 }
2255 tmr = &stcb->asoc.delete_prim_timer;
2256 break;
2257 case SCTP_TIMER_TYPE_AUTOCLOSE:
2258 if (stcb == NULL) {
2259 return;
2260 }
2261 tmr = &stcb->asoc.autoclose_timer;
2262 break;
2263 default:
2264 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2265 __FUNCTION__, t_type);
2266 break;
2267 }
2268 if (tmr == NULL) {
2269 return;
2270 }
2271 if ((tmr->type != t_type) && tmr->type) {
2272 /*
2273 * Ok we have a timer that is under joint use. Cookie timer
2274 * per chance with the SEND timer. We therefore are NOT
2275 * running the timer that the caller wants stopped. So just
2276 * return.
2277 */
2278 return;
2279 }
2280 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2281 stcb->asoc.num_send_timers_up--;
2282 if (stcb->asoc.num_send_timers_up < 0) {
2283 stcb->asoc.num_send_timers_up = 0;
2284 }
2285 }
2286 tmr->self = NULL;
2287 tmr->stopped_from = from;
2288 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2289 return;
2290 }
2291
2292 uint32_t
2293 sctp_calculate_len(struct mbuf *m)
2294 {
2295 uint32_t tlen = 0;
2296 struct mbuf *at;
2297
2298 at = m;
2299 while (at) {
2300 tlen += SCTP_BUF_LEN(at);
2301 at = SCTP_BUF_NEXT(at);
2302 }
2303 return (tlen);
2304 }
2305
2306 void
2307 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2308 struct sctp_association *asoc, uint32_t mtu)
2309 {
2310 /*
2311 * Reset the P-MTU size on this association, this involves changing
2312 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2313 * allow the DF flag to be cleared.
2314 */
2315 struct sctp_tmit_chunk *chk;
2316 unsigned int eff_mtu, ovh;
2317
2318 asoc->smallest_mtu = mtu;
2319 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2320 ovh = SCTP_MIN_OVERHEAD;
2321 } else {
2322 ovh = SCTP_MIN_V4_OVERHEAD;
2323 }
2324 eff_mtu = mtu - ovh;
2325 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2326 if (chk->send_size > eff_mtu) {
2327 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2328 }
2329 }
2330 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2331 if (chk->send_size > eff_mtu) {
2332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2333 }
2334 }
2335 }
2336
2337
2338 /*
2339 * given an association and starting time of the current RTT period return
2340 * RTO in number of msecs net should point to the current network
2341 */
2342
2343 uint32_t
2344 sctp_calculate_rto(struct sctp_tcb *stcb,
2345 struct sctp_association *asoc,
2346 struct sctp_nets *net,
2347 struct timeval *told,
2348 int safe, int rtt_from_sack)
2349 {
2350 /*-
2351 * given an association and the starting time of the current RTT
2352 * period (in value1/value2) return RTO in number of msecs.
2353 */
2354 int32_t rtt; /* RTT in ms */
2355 uint32_t new_rto;
2356 int first_measure = 0;
2357 struct timeval now, then, *old;
2358
2359 /* Copy it out for sparc64 */
2360 if (safe == sctp_align_unsafe_makecopy) {
2361 old = &then;
2362 memcpy(&then, told, sizeof(struct timeval));
2363 } else if (safe == sctp_align_safe_nocopy) {
2364 old = told;
2365 } else {
2366 /* error */
2367 SCTP_PRINTF("Huh, bad rto calc call\n");
2368 return (0);
2369 }
2370 /************************/
2371 /* 1. calculate new RTT */
2372 /************************/
2373 /* get the current time */
2374 if (stcb->asoc.use_precise_time) {
2375 (void)SCTP_GETPTIME_TIMEVAL(&now);
2376 } else {
2377 (void)SCTP_GETTIME_TIMEVAL(&now);
2378 }
2379 timevalsub(&now, old);
2380 /* store the current RTT in us */
2381 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2382 (uint64_t) now.tv_usec;
2383
2384 /* computer rtt in ms */
2385 rtt = net->rtt / 1000;
2386 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2387 /*
2388 * Tell the CC module that a new update has just occurred
2389 * from a sack
2390 */
2391 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2392 }
2393 /*
2394 * Do we need to determine the lan? We do this only on sacks i.e.
2395 * RTT being determined from data not non-data (HB/INIT->INITACK).
2396 */
2397 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2398 (net->lan_type == SCTP_LAN_UNKNOWN)) {
2399 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2400 net->lan_type = SCTP_LAN_INTERNET;
2401 } else {
2402 net->lan_type = SCTP_LAN_LOCAL;
2403 }
2404 }
2405 /***************************/
2406 /* 2. update RTTVAR & SRTT */
2407 /***************************/
2408 /*-
2409 * Compute the scaled average lastsa and the
2410 * scaled variance lastsv as described in van Jacobson
2411 * Paper "Congestion Avoidance and Control", Annex A.
2412 *
2413 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2414 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2415 */
2416 if (net->RTO_measured) {
2417 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2418 net->lastsa += rtt;
2419 if (rtt < 0) {
2420 rtt = -rtt;
2421 }
2422 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2423 net->lastsv += rtt;
2424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2425 rto_logging(net, SCTP_LOG_RTTVAR);
2426 }
2427 } else {
2428 /* First RTO measurment */
2429 net->RTO_measured = 1;
2430 first_measure = 1;
2431 net->lastsa = rtt << SCTP_RTT_SHIFT;
2432 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2435 }
2436 }
2437 if (net->lastsv == 0) {
2438 net->lastsv = SCTP_CLOCK_GRANULARITY;
2439 }
2440 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2441 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2442 (stcb->asoc.sat_network_lockout == 0)) {
2443 stcb->asoc.sat_network = 1;
2444 } else if ((!first_measure) && stcb->asoc.sat_network) {
2445 stcb->asoc.sat_network = 0;
2446 stcb->asoc.sat_network_lockout = 1;
2447 }
2448 /* bound it, per C6/C7 in Section 5.3.1 */
2449 if (new_rto < stcb->asoc.minrto) {
2450 new_rto = stcb->asoc.minrto;
2451 }
2452 if (new_rto > stcb->asoc.maxrto) {
2453 new_rto = stcb->asoc.maxrto;
2454 }
2455 /* we are now returning the RTO */
2456 return (new_rto);
2457 }
2458
2459 /*
2460 * return a pointer to a contiguous piece of data from the given mbuf chain
2461 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2462 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2463 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2464 */
2465 caddr_t
2466 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2467 {
2468 uint32_t count;
2469 uint8_t *ptr;
2470
2471 ptr = in_ptr;
2472 if ((off < 0) || (len <= 0))
2473 return (NULL);
2474
2475 /* find the desired start location */
2476 while ((m != NULL) && (off > 0)) {
2477 if (off < SCTP_BUF_LEN(m))
2478 break;
2479 off -= SCTP_BUF_LEN(m);
2480 m = SCTP_BUF_NEXT(m);
2481 }
2482 if (m == NULL)
2483 return (NULL);
2484
2485 /* is the current mbuf large enough (eg. contiguous)? */
2486 if ((SCTP_BUF_LEN(m) - off) >= len) {
2487 return (mtod(m, caddr_t)+off);
2488 } else {
2489 /* else, it spans more than one mbuf, so save a temp copy... */
2490 while ((m != NULL) && (len > 0)) {
2491 count = min(SCTP_BUF_LEN(m) - off, len);
2492 bcopy(mtod(m, caddr_t)+off, ptr, count);
2493 len -= count;
2494 ptr += count;
2495 off = 0;
2496 m = SCTP_BUF_NEXT(m);
2497 }
2498 if ((m == NULL) && (len > 0))
2499 return (NULL);
2500 else
2501 return ((caddr_t)in_ptr);
2502 }
2503 }
2504
2505
2506
2507 struct sctp_paramhdr *
2508 sctp_get_next_param(struct mbuf *m,
2509 int offset,
2510 struct sctp_paramhdr *pull,
2511 int pull_limit)
2512 {
2513 /* This just provides a typed signature to Peter's Pull routine */
2514 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2515 (uint8_t *) pull));
2516 }
2517
2518
2519 int
2520 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2521 {
2522 /*
2523 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2524 * padlen is > 3 this routine will fail.
2525 */
2526 uint8_t *dp;
2527 int i;
2528
2529 if (padlen > 3) {
2530 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2531 return (ENOBUFS);
2532 }
2533 if (padlen <= M_TRAILINGSPACE(m)) {
2534 /*
2535 * The easy way. We hope the majority of the time we hit
2536 * here :)
2537 */
2538 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2539 SCTP_BUF_LEN(m) += padlen;
2540 } else {
2541 /* Hard way we must grow the mbuf */
2542 struct mbuf *tmp;
2543
2544 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2545 if (tmp == NULL) {
2546 /* Out of space GAK! we are in big trouble. */
2547 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2548 return (ENOBUFS);
2549 }
2550 /* setup and insert in middle */
2551 SCTP_BUF_LEN(tmp) = padlen;
2552 SCTP_BUF_NEXT(tmp) = NULL;
2553 SCTP_BUF_NEXT(m) = tmp;
2554 dp = mtod(tmp, uint8_t *);
2555 }
2556 /* zero out the pad */
2557 for (i = 0; i < padlen; i++) {
2558 *dp = 0;
2559 dp++;
2560 }
2561 return (0);
2562 }
2563
2564 int
2565 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2566 {
2567 /* find the last mbuf in chain and pad it */
2568 struct mbuf *m_at;
2569
2570 if (last_mbuf) {
2571 return (sctp_add_pad_tombuf(last_mbuf, padval));
2572 } else {
2573 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2574 if (SCTP_BUF_NEXT(m_at) == NULL) {
2575 return (sctp_add_pad_tombuf(m_at, padval));
2576 }
2577 }
2578 }
2579 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2580 return (EFAULT);
2581 }
2582
2583 static void
2584 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2585 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2586 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2587 SCTP_UNUSED
2588 #endif
2589 )
2590 {
2591 struct mbuf *m_notify;
2592 struct sctp_assoc_change *sac;
2593 struct sctp_queued_to_read *control;
2594 size_t notif_len, abort_len;
2595 unsigned int i;
2596
2597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2598 struct socket *so;
2599
2600 #endif
2601
2602 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2603 notif_len = sizeof(struct sctp_assoc_change);
2604 if (abort != NULL) {
2605 abort_len = htons(abort->ch.chunk_length);
2606 } else {
2607 abort_len = 0;
2608 }
2609 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2610 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2611 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2612 notif_len += abort_len;
2613 }
2614 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2615 if (m_notify == NULL) {
2616 /* Retry with smaller value. */
2617 notif_len = sizeof(struct sctp_assoc_change);
2618 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2619 if (m_notify == NULL) {
2620 goto set_error;
2621 }
2622 }
2623 SCTP_BUF_NEXT(m_notify) = NULL;
2624 sac = mtod(m_notify, struct sctp_assoc_change *);
2625 memset(sac, 0, notif_len);
2626 sac->sac_type = SCTP_ASSOC_CHANGE;
2627 sac->sac_flags = 0;
2628 sac->sac_length = sizeof(struct sctp_assoc_change);
2629 sac->sac_state = state;
2630 sac->sac_error = error;
2631 /* XXX verify these stream counts */
2632 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2633 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2634 sac->sac_assoc_id = sctp_get_associd(stcb);
2635 if (notif_len > sizeof(struct sctp_assoc_change)) {
2636 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2637 i = 0;
2638 if (stcb->asoc.peer_supports_prsctp) {
2639 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2640 }
2641 if (stcb->asoc.peer_supports_auth) {
2642 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2643 }
2644 if (stcb->asoc.peer_supports_asconf) {
2645 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2646 }
2647 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2648 if (stcb->asoc.peer_supports_strreset) {
2649 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2650 }
2651 sac->sac_length += i;
2652 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2653 memcpy(sac->sac_info, abort, abort_len);
2654 sac->sac_length += abort_len;
2655 }
2656 }
2657 SCTP_BUF_LEN(m_notify) = sac->sac_length;
2658 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2659 0, 0, stcb->asoc.context, 0, 0, 0,
2660 m_notify);
2661 if (control != NULL) {
2662 control->length = SCTP_BUF_LEN(m_notify);
2663 /* not that we need this */
2664 control->tail_mbuf = m_notify;
2665 control->spec_flags = M_NOTIFICATION;
2666 sctp_add_to_readq(stcb->sctp_ep, stcb,
2667 control,
2668 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2669 so_locked);
2670 } else {
2671 sctp_m_freem(m_notify);
2672 }
2673 }
2674 /*
2675 * For 1-to-1 style sockets, we send up and error when an ABORT
2676 * comes in.
2677 */
2678 set_error:
2679 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2680 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2681 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2682 SOCK_LOCK(stcb->sctp_socket);
2683 if (from_peer) {
2684 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2685 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2686 stcb->sctp_socket->so_error = ECONNREFUSED;
2687 } else {
2688 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2689 stcb->sctp_socket->so_error = ECONNRESET;
2690 }
2691 } else {
2692 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2693 stcb->sctp_socket->so_error = ECONNABORTED;
2694 }
2695 }
2696 /* Wake ANY sleepers */
2697 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2698 so = SCTP_INP_SO(stcb->sctp_ep);
2699 if (!so_locked) {
2700 atomic_add_int(&stcb->asoc.refcnt, 1);
2701 SCTP_TCB_UNLOCK(stcb);
2702 SCTP_SOCKET_LOCK(so, 1);
2703 SCTP_TCB_LOCK(stcb);
2704 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2705 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2706 SCTP_SOCKET_UNLOCK(so, 1);
2707 return;
2708 }
2709 }
2710 #endif
2711 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2712 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2713 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2714 socantrcvmore_locked(stcb->sctp_socket);
2715 }
2716 sorwakeup(stcb->sctp_socket);
2717 sowwakeup(stcb->sctp_socket);
2718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2719 if (!so_locked) {
2720 SCTP_SOCKET_UNLOCK(so, 1);
2721 }
2722 #endif
2723 }
2724
2725 static void
2726 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2727 struct sockaddr *sa, uint32_t error)
2728 {
2729 struct mbuf *m_notify;
2730 struct sctp_paddr_change *spc;
2731 struct sctp_queued_to_read *control;
2732
2733 if ((stcb == NULL) ||
2734 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2735 /* event not enabled */
2736 return;
2737 }
2738 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2739 if (m_notify == NULL)
2740 return;
2741 SCTP_BUF_LEN(m_notify) = 0;
2742 spc = mtod(m_notify, struct sctp_paddr_change *);
2743 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2744 spc->spc_flags = 0;
2745 spc->spc_length = sizeof(struct sctp_paddr_change);
2746 switch (sa->sa_family) {
2747 #ifdef INET
2748 case AF_INET:
2749 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2750 break;
2751 #endif
2752 #ifdef INET6
2753 case AF_INET6:
2754 {
2755 struct sockaddr_in6 *sin6;
2756
2757 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2758
2759 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2760 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2761 if (sin6->sin6_scope_id == 0) {
2762 /* recover scope_id for user */
2763 (void)sa6_recoverscope(sin6);
2764 } else {
2765 /* clear embedded scope_id for user */
2766 in6_clearscope(&sin6->sin6_addr);
2767 }
2768 }
2769 break;
2770 }
2771 #endif
2772 default:
2773 /* TSNH */
2774 break;
2775 }
2776 spc->spc_state = state;
2777 spc->spc_error = error;
2778 spc->spc_assoc_id = sctp_get_associd(stcb);
2779
2780 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2781 SCTP_BUF_NEXT(m_notify) = NULL;
2782
2783 /* append to socket */
2784 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2785 0, 0, stcb->asoc.context, 0, 0, 0,
2786 m_notify);
2787 if (control == NULL) {
2788 /* no memory */
2789 sctp_m_freem(m_notify);
2790 return;
2791 }
2792 control->length = SCTP_BUF_LEN(m_notify);
2793 control->spec_flags = M_NOTIFICATION;
2794 /* not that we need this */
2795 control->tail_mbuf = m_notify;
2796 sctp_add_to_readq(stcb->sctp_ep, stcb,
2797 control,
2798 &stcb->sctp_socket->so_rcv, 1,
2799 SCTP_READ_LOCK_NOT_HELD,
2800 SCTP_SO_NOT_LOCKED);
2801 }
2802
2803
2804 static void
2805 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2806 struct sctp_tmit_chunk *chk, int so_locked
2807 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2808 SCTP_UNUSED
2809 #endif
2810 )
2811 {
2812 struct mbuf *m_notify;
2813 struct sctp_send_failed *ssf;
2814 struct sctp_send_failed_event *ssfe;
2815 struct sctp_queued_to_read *control;
2816 int length;
2817
2818 if ((stcb == NULL) ||
2819 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2820 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2821 /* event not enabled */
2822 return;
2823 }
2824 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2825 length = sizeof(struct sctp_send_failed_event);
2826 } else {
2827 length = sizeof(struct sctp_send_failed);
2828 }
2829 m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2830 if (m_notify == NULL)
2831 /* no space left */
2832 return;
2833 SCTP_BUF_LEN(m_notify) = 0;
2834 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2835 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2836 memset(ssfe, 0, length);
2837 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2838 if (sent) {
2839 ssfe->ssfe_flags = SCTP_DATA_SENT;
2840 } else {
2841 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2842 }
2843 length += chk->send_size;
2844 length -= sizeof(struct sctp_data_chunk);
2845 ssfe->ssfe_length = length;
2846 ssfe->ssfe_error = error;
2847 /* not exactly what the user sent in, but should be close :) */
2848 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2849 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2850 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2851 ssfe->ssfe_info.snd_context = chk->rec.data.context;
2852 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2853 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2854 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2855 } else {
2856 ssf = mtod(m_notify, struct sctp_send_failed *);
2857 memset(ssf, 0, length);
2858 ssf->ssf_type = SCTP_SEND_FAILED;
2859 if (sent) {
2860 ssf->ssf_flags = SCTP_DATA_SENT;
2861 } else {
2862 ssf->ssf_flags = SCTP_DATA_UNSENT;
2863 }
2864 length += chk->send_size;
2865 length -= sizeof(struct sctp_data_chunk);
2866 ssf->ssf_length = length;
2867 ssf->ssf_error = error;
2868 /* not exactly what the user sent in, but should be close :) */
2869 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2870 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2871 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2872 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2873 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2874 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2875 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2876 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2877 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2878 }
2879 if (chk->data) {
2880 /*
2881 * trim off the sctp chunk header(it should be there)
2882 */
2883 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2884 m_adj(chk->data, sizeof(struct sctp_data_chunk));
2885 sctp_mbuf_crush(chk->data);
2886 chk->send_size -= sizeof(struct sctp_data_chunk);
2887 }
2888 }
2889 SCTP_BUF_NEXT(m_notify) = chk->data;
2890 /* Steal off the mbuf */
2891 chk->data = NULL;
2892 /*
2893 * For this case, we check the actual socket buffer, since the assoc
2894 * is going away we don't want to overfill the socket buffer for a
2895 * non-reader
2896 */
2897 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2898 sctp_m_freem(m_notify);
2899 return;
2900 }
2901 /* append to socket */
2902 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2903 0, 0, stcb->asoc.context, 0, 0, 0,
2904 m_notify);
2905 if (control == NULL) {
2906 /* no memory */
2907 sctp_m_freem(m_notify);
2908 return;
2909 }
2910 control->spec_flags = M_NOTIFICATION;
2911 sctp_add_to_readq(stcb->sctp_ep, stcb,
2912 control,
2913 &stcb->sctp_socket->so_rcv, 1,
2914 SCTP_READ_LOCK_NOT_HELD,
2915 so_locked);
2916 }
2917
2918
2919 static void
2920 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2921 struct sctp_stream_queue_pending *sp, int so_locked
2922 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2923 SCTP_UNUSED
2924 #endif
2925 )
2926 {
2927 struct mbuf *m_notify;
2928 struct sctp_send_failed *ssf;
2929 struct sctp_send_failed_event *ssfe;
2930 struct sctp_queued_to_read *control;
2931 int length;
2932
2933 if ((stcb == NULL) ||
2934 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2935 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2936 /* event not enabled */
2937 return;
2938 }
2939 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2940 length = sizeof(struct sctp_send_failed_event);
2941 } else {
2942 length = sizeof(struct sctp_send_failed);
2943 }
2944 m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2945 if (m_notify == NULL) {
2946 /* no space left */
2947 return;
2948 }
2949 SCTP_BUF_LEN(m_notify) = 0;
2950 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2951 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2952 memset(ssfe, 0, length);
2953 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2954 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2955 length += sp->length;
2956 ssfe->ssfe_length = length;
2957 ssfe->ssfe_error = error;
2958 /* not exactly what the user sent in, but should be close :) */
2959 ssfe->ssfe_info.snd_sid = sp->stream;
2960 if (sp->some_taken) {
2961 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2962 } else {
2963 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2964 }
2965 ssfe->ssfe_info.snd_ppid = sp->ppid;
2966 ssfe->ssfe_info.snd_context = sp->context;
2967 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2968 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2969 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2970 } else {
2971 ssf = mtod(m_notify, struct sctp_send_failed *);
2972 memset(ssf, 0, length);
2973 ssf->ssf_type = SCTP_SEND_FAILED;
2974 ssf->ssf_flags = SCTP_DATA_UNSENT;
2975 length += sp->length;
2976 ssf->ssf_length = length;
2977 ssf->ssf_error = error;
2978 /* not exactly what the user sent in, but should be close :) */
2979 ssf->ssf_info.sinfo_stream = sp->stream;
2980 ssf->ssf_info.sinfo_ssn = 0;
2981 if (sp->some_taken) {
2982 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2983 } else {
2984 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2985 }
2986 ssf->ssf_info.sinfo_ppid = sp->ppid;
2987 ssf->ssf_info.sinfo_context = sp->context;
2988 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2989 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2990 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2991 }
2992 SCTP_BUF_NEXT(m_notify) = sp->data;
2993
2994 /* Steal off the mbuf */
2995 sp->data = NULL;
2996 /*
2997 * For this case, we check the actual socket buffer, since the assoc
2998 * is going away we don't want to overfill the socket buffer for a
2999 * non-reader
3000 */
3001 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3002 sctp_m_freem(m_notify);
3003 return;
3004 }
3005 /* append to socket */
3006 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3007 0, 0, stcb->asoc.context, 0, 0, 0,
3008 m_notify);
3009 if (control == NULL) {
3010 /* no memory */
3011 sctp_m_freem(m_notify);
3012 return;
3013 }
3014 control->spec_flags = M_NOTIFICATION;
3015 sctp_add_to_readq(stcb->sctp_ep, stcb,
3016 control,
3017 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3018 }
3019
3020
3021
3022 static void
3023 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3024 {
3025 struct mbuf *m_notify;
3026 struct sctp_adaptation_event *sai;
3027 struct sctp_queued_to_read *control;
3028
3029 if ((stcb == NULL) ||
3030 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3031 /* event not enabled */
3032 return;
3033 }
3034 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3035 if (m_notify == NULL)
3036 /* no space left */
3037 return;
3038 SCTP_BUF_LEN(m_notify) = 0;
3039 sai = mtod(m_notify, struct sctp_adaptation_event *);
3040 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3041 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3042 sai->sai_flags = 0;
3043 sai->sai_length = sizeof(struct sctp_adaptation_event);
3044 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3045 sai->sai_assoc_id = sctp_get_associd(stcb);
3046
3047 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3048 SCTP_BUF_NEXT(m_notify) = NULL;
3049
3050 /* append to socket */
3051 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3052 0, 0, stcb->asoc.context, 0, 0, 0,
3053 m_notify);
3054 if (control == NULL) {
3055 /* no memory */
3056 sctp_m_freem(m_notify);
3057 return;
3058 }
3059 control->length = SCTP_BUF_LEN(m_notify);
3060 control->spec_flags = M_NOTIFICATION;
3061 /* not that we need this */
3062 control->tail_mbuf = m_notify;
3063 sctp_add_to_readq(stcb->sctp_ep, stcb,
3064 control,
3065 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3066 }
3067
3068 /* This always must be called with the read-queue LOCKED in the INP */
3069 static void
3070 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3071 uint32_t val, int so_locked
3072 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3073 SCTP_UNUSED
3074 #endif
3075 )
3076 {
3077 struct mbuf *m_notify;
3078 struct sctp_pdapi_event *pdapi;
3079 struct sctp_queued_to_read *control;
3080 struct sockbuf *sb;
3081
3082 if ((stcb == NULL) ||
3083 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3084 /* event not enabled */
3085 return;
3086 }
3087 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3088 return;
3089 }
3090 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3091 if (m_notify == NULL)
3092 /* no space left */
3093 return;
3094 SCTP_BUF_LEN(m_notify) = 0;
3095 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3096 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3097 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3098 pdapi->pdapi_flags = 0;
3099 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3100 pdapi->pdapi_indication = error;
3101 pdapi->pdapi_stream = (val >> 16);
3102 pdapi->pdapi_seq = (val & 0x0000ffff);
3103 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3104
3105 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3106 SCTP_BUF_NEXT(m_notify) = NULL;
3107 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3108 0, 0, stcb->asoc.context, 0, 0, 0,
3109 m_notify);
3110 if (control == NULL) {
3111 /* no memory */
3112 sctp_m_freem(m_notify);
3113 return;
3114 }
3115 control->spec_flags = M_NOTIFICATION;
3116 control->length = SCTP_BUF_LEN(m_notify);
3117 /* not that we need this */
3118 control->tail_mbuf = m_notify;
3119 control->held_length = 0;
3120 control->length = 0;
3121 sb = &stcb->sctp_socket->so_rcv;
3122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3123 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3124 }
3125 sctp_sballoc(stcb, sb, m_notify);
3126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3127 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3128 }
3129 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3130 control->end_added = 1;
3131 if (stcb->asoc.control_pdapi)
3132 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3133 else {
3134 /* we really should not see this case */
3135 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3136 }
3137 if (stcb->sctp_ep && stcb->sctp_socket) {
3138 /* This should always be the case */
3139 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3140 struct socket *so;
3141
3142 so = SCTP_INP_SO(stcb->sctp_ep);
3143 if (!so_locked) {
3144 atomic_add_int(&stcb->asoc.refcnt, 1);
3145 SCTP_TCB_UNLOCK(stcb);
3146 SCTP_SOCKET_LOCK(so, 1);
3147 SCTP_TCB_LOCK(stcb);
3148 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3149 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3150 SCTP_SOCKET_UNLOCK(so, 1);
3151 return;
3152 }
3153 }
3154 #endif
3155 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3156 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3157 if (!so_locked) {
3158 SCTP_SOCKET_UNLOCK(so, 1);
3159 }
3160 #endif
3161 }
3162 }
3163
3164 static void
3165 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3166 {
3167 struct mbuf *m_notify;
3168 struct sctp_shutdown_event *sse;
3169 struct sctp_queued_to_read *control;
3170
3171 /*
3172 * For TCP model AND UDP connected sockets we will send an error up
3173 * when an SHUTDOWN completes
3174 */
3175 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3176 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3177 /* mark socket closed for read/write and wakeup! */
3178 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3179 struct socket *so;
3180
3181 so = SCTP_INP_SO(stcb->sctp_ep);
3182 atomic_add_int(&stcb->asoc.refcnt, 1);
3183 SCTP_TCB_UNLOCK(stcb);
3184 SCTP_SOCKET_LOCK(so, 1);
3185 SCTP_TCB_LOCK(stcb);
3186 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3187 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3188 SCTP_SOCKET_UNLOCK(so, 1);
3189 return;
3190 }
3191 #endif
3192 socantsendmore(stcb->sctp_socket);
3193 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3194 SCTP_SOCKET_UNLOCK(so, 1);
3195 #endif
3196 }
3197 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3198 /* event not enabled */
3199 return;
3200 }
3201 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3202 if (m_notify == NULL)
3203 /* no space left */
3204 return;
3205 sse = mtod(m_notify, struct sctp_shutdown_event *);
3206 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3207 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3208 sse->sse_flags = 0;
3209 sse->sse_length = sizeof(struct sctp_shutdown_event);
3210 sse->sse_assoc_id = sctp_get_associd(stcb);
3211
3212 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3213 SCTP_BUF_NEXT(m_notify) = NULL;
3214
3215 /* append to socket */
3216 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3217 0, 0, stcb->asoc.context, 0, 0, 0,
3218 m_notify);
3219 if (control == NULL) {
3220 /* no memory */
3221 sctp_m_freem(m_notify);
3222 return;
3223 }
3224 control->spec_flags = M_NOTIFICATION;
3225 control->length = SCTP_BUF_LEN(m_notify);
3226 /* not that we need this */
3227 control->tail_mbuf = m_notify;
3228 sctp_add_to_readq(stcb->sctp_ep, stcb,
3229 control,
3230 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3231 }
3232
3233 static void
3234 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3235 int so_locked
3236 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3237 SCTP_UNUSED
3238 #endif
3239 )
3240 {
3241 struct mbuf *m_notify;
3242 struct sctp_sender_dry_event *event;
3243 struct sctp_queued_to_read *control;
3244
3245 if ((stcb == NULL) ||
3246 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3247 /* event not enabled */
3248 return;
3249 }
3250 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3251 if (m_notify == NULL) {
3252 /* no space left */
3253 return;
3254 }
3255 SCTP_BUF_LEN(m_notify) = 0;
3256 event = mtod(m_notify, struct sctp_sender_dry_event *);
3257 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3258 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3259 event->sender_dry_flags = 0;
3260 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3261 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3262
3263 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3264 SCTP_BUF_NEXT(m_notify) = NULL;
3265
3266 /* append to socket */
3267 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3268 0, 0, stcb->asoc.context, 0, 0, 0,
3269 m_notify);
3270 if (control == NULL) {
3271 /* no memory */
3272 sctp_m_freem(m_notify);
3273 return;
3274 }
3275 control->length = SCTP_BUF_LEN(m_notify);
3276 control->spec_flags = M_NOTIFICATION;
3277 /* not that we need this */
3278 control->tail_mbuf = m_notify;
3279 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3280 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3281 }
3282
3283
3284 void
3285 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3286 {
3287 struct mbuf *m_notify;
3288 struct sctp_queued_to_read *control;
3289 struct sctp_stream_change_event *stradd;
3290
3291 if ((stcb == NULL) ||
3292 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3293 /* event not enabled */
3294 return;
3295 }
3296 if ((stcb->asoc.peer_req_out) && flag) {
3297 /* Peer made the request, don't tell the local user */
3298 stcb->asoc.peer_req_out = 0;
3299 return;
3300 }
3301 stcb->asoc.peer_req_out = 0;
3302 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_DONTWAIT, 1, MT_DATA);
3303 if (m_notify == NULL)
3304 /* no space left */
3305 return;
3306 SCTP_BUF_LEN(m_notify) = 0;
3307 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3308 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3309 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3310 stradd->strchange_flags = flag;
3311 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3312 stradd->strchange_assoc_id = sctp_get_associd(stcb);
3313 stradd->strchange_instrms = numberin;
3314 stradd->strchange_outstrms = numberout;
3315 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3316 SCTP_BUF_NEXT(m_notify) = NULL;
3317 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3318 /* no space */
3319 sctp_m_freem(m_notify);
3320 return;
3321 }
3322 /* append to socket */
3323 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3324 0, 0, stcb->asoc.context, 0, 0, 0,
3325 m_notify);
3326 if (control == NULL) {
3327 /* no memory */
3328 sctp_m_freem(m_notify);
3329 return;
3330 }
3331 control->spec_flags = M_NOTIFICATION;
3332 control->length = SCTP_BUF_LEN(m_notify);
3333 /* not that we need this */
3334 control->tail_mbuf = m_notify;
3335 sctp_add_to_readq(stcb->sctp_ep, stcb,
3336 control,
3337 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3338 }
3339
3340 void
3341 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3342 {
3343 struct mbuf *m_notify;
3344 struct sctp_queued_to_read *control;
3345 struct sctp_assoc_reset_event *strasoc;
3346
3347 if ((stcb == NULL) ||
3348 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3349 /* event not enabled */
3350 return;
3351 }
3352 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_DONTWAIT, 1, MT_DATA);
3353 if (m_notify == NULL)
3354 /* no space left */
3355 return;
3356 SCTP_BUF_LEN(m_notify) = 0;
3357 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3358 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3359 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3360 strasoc->assocreset_flags = flag;
3361 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3362 strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3363 strasoc->assocreset_local_tsn = sending_tsn;
3364 strasoc->assocreset_remote_tsn = recv_tsn;
3365 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3366 SCTP_BUF_NEXT(m_notify) = NULL;
3367 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3368 /* no space */
3369 sctp_m_freem(m_notify);
3370 return;
3371 }
3372 /* append to socket */
3373 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3374 0, 0, stcb->asoc.context, 0, 0, 0,
3375 m_notify);
3376 if (control == NULL) {
3377 /* no memory */
3378 sctp_m_freem(m_notify);
3379 return;
3380 }
3381 control->spec_flags = M_NOTIFICATION;
3382 control->length = SCTP_BUF_LEN(m_notify);
3383 /* not that we need this */
3384 control->tail_mbuf = m_notify;
3385 sctp_add_to_readq(stcb->sctp_ep, stcb,
3386 control,
3387 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3388 }
3389
3390
3391
3392 static void
3393 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3394 int number_entries, uint16_t * list, int flag)
3395 {
3396 struct mbuf *m_notify;
3397 struct sctp_queued_to_read *control;
3398 struct sctp_stream_reset_event *strreset;
3399 int len;
3400
3401 if ((stcb == NULL) ||
3402 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3403 /* event not enabled */
3404 return;
3405 }
3406 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3407 if (m_notify == NULL)
3408 /* no space left */
3409 return;
3410 SCTP_BUF_LEN(m_notify) = 0;
3411 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3412 if (len > M_TRAILINGSPACE(m_notify)) {
3413 /* never enough room */
3414 sctp_m_freem(m_notify);
3415 return;
3416 }
3417 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3418 memset(strreset, 0, len);
3419 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3420 strreset->strreset_flags = flag;
3421 strreset->strreset_length = len;
3422 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3423 if (number_entries) {
3424 int i;
3425
3426 for (i = 0; i < number_entries; i++) {
3427 strreset->strreset_stream_list[i] = ntohs(list[i]);
3428 }
3429 }
3430 SCTP_BUF_LEN(m_notify) = len;
3431 SCTP_BUF_NEXT(m_notify) = NULL;
3432 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3433 /* no space */
3434 sctp_m_freem(m_notify);
3435 return;
3436 }
3437 /* append to socket */
3438 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3439 0, 0, stcb->asoc.context, 0, 0, 0,
3440 m_notify);
3441 if (control == NULL) {
3442 /* no memory */
3443 sctp_m_freem(m_notify);
3444 return;
3445 }
3446 control->spec_flags = M_NOTIFICATION;
3447 control->length = SCTP_BUF_LEN(m_notify);
3448 /* not that we need this */
3449 control->tail_mbuf = m_notify;
3450 sctp_add_to_readq(stcb->sctp_ep, stcb,
3451 control,
3452 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3453 }
3454
3455
3456 static void
3457 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3458 {
3459 struct mbuf *m_notify;
3460 struct sctp_remote_error *sre;
3461 struct sctp_queued_to_read *control;
3462 size_t notif_len, chunk_len;
3463
3464 if ((stcb == NULL) ||
3465 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3466 return;
3467 }
3468 if (chunk != NULL) {
3469 chunk_len = htons(chunk->ch.chunk_length);
3470 } else {
3471 chunk_len = 0;
3472 }
3473 notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3474 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3475 if (m_notify == NULL) {
3476 /* Retry with smaller value. */
3477 notif_len = sizeof(struct sctp_remote_error);
3478 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3479 if (m_notify == NULL) {
3480 return;
3481 }
3482 }
3483 SCTP_BUF_NEXT(m_notify) = NULL;
3484 sre = mtod(m_notify, struct sctp_remote_error *);
3485 sre->sre_type = SCTP_REMOTE_ERROR;
3486 sre->sre_flags = 0;
3487 sre->sre_length = sizeof(struct sctp_remote_error);
3488 sre->sre_error = error;
3489 sre->sre_assoc_id = sctp_get_associd(stcb);
3490 if (notif_len > sizeof(struct sctp_remote_error)) {
3491 memcpy(sre->sre_data, chunk, chunk_len);
3492 sre->sre_length += chunk_len;
3493 }
3494 SCTP_BUF_LEN(m_notify) = sre->sre_length;
3495 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3496 0, 0, stcb->asoc.context, 0, 0, 0,
3497 m_notify);
3498 if (control != NULL) {
3499 control->length = SCTP_BUF_LEN(m_notify);
3500 /* not that we need this */
3501 control->tail_mbuf = m_notify;
3502 control->spec_flags = M_NOTIFICATION;
3503 sctp_add_to_readq(stcb->sctp_ep, stcb,
3504 control,
3505 &stcb->sctp_socket->so_rcv, 1,
3506 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3507 } else {
3508 sctp_m_freem(m_notify);
3509 }
3510 }
3511
3512
3513 void
3514 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3515 uint32_t error, void *data, int so_locked
3516 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3517 SCTP_UNUSED
3518 #endif
3519 )
3520 {
3521 if ((stcb == NULL) ||
3522 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3523 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3524 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3525 /* If the socket is gone we are out of here */
3526 return;
3527 }
3528 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3529 return;
3530 }
3531 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3532 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3533 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3534 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3535 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3536 /* Don't report these in front states */
3537 return;
3538 }
3539 }
3540 switch (notification) {
3541 case SCTP_NOTIFY_ASSOC_UP:
3542 if (stcb->asoc.assoc_up_sent == 0) {
3543 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3544 stcb->asoc.assoc_up_sent = 1;
3545 }
3546 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3547 sctp_notify_adaptation_layer(stcb);
3548 }
3549 if (stcb->asoc.peer_supports_auth == 0) {
3550 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3551 NULL, so_locked);
3552 }
3553 break;
3554 case SCTP_NOTIFY_ASSOC_DOWN:
3555 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3556 break;
3557 case SCTP_NOTIFY_INTERFACE_DOWN:
3558 {
3559 struct sctp_nets *net;
3560
3561 net = (struct sctp_nets *)data;
3562 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3563 (struct sockaddr *)&net->ro._l_addr, error);
3564 break;
3565 }
3566 case SCTP_NOTIFY_INTERFACE_UP:
3567 {
3568 struct sctp_nets *net;
3569
3570 net = (struct sctp_nets *)data;
3571 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3572 (struct sockaddr *)&net->ro._l_addr, error);
3573 break;
3574 }
3575 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3576 {
3577 struct sctp_nets *net;
3578
3579 net = (struct sctp_nets *)data;
3580 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3581 (struct sockaddr *)&net->ro._l_addr, error);
3582 break;
3583 }
3584 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3585 sctp_notify_send_failed2(stcb, error,
3586 (struct sctp_stream_queue_pending *)data, so_locked);
3587 break;
3588 case SCTP_NOTIFY_SENT_DG_FAIL:
3589 sctp_notify_send_failed(stcb, 1, error,
3590 (struct sctp_tmit_chunk *)data, so_locked);
3591 break;
3592 case SCTP_NOTIFY_UNSENT_DG_FAIL:
3593 sctp_notify_send_failed(stcb, 0, error,
3594 (struct sctp_tmit_chunk *)data, so_locked);
3595 break;
3596 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3597 {
3598 uint32_t val;
3599
3600 val = *((uint32_t *) data);
3601
3602 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3603 break;
3604 }
3605 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3606 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3607 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3608 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3609 } else {
3610 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3611 }
3612 break;
3613 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3614 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3615 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3616 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3617 } else {
3618 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3619 }
3620 break;
3621 case SCTP_NOTIFY_ASSOC_RESTART:
3622 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3623 if (stcb->asoc.peer_supports_auth == 0) {
3624 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3625 NULL, so_locked);
3626 }
3627 break;
3628 case SCTP_NOTIFY_STR_RESET_SEND:
3629 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3630 break;
3631 case SCTP_NOTIFY_STR_RESET_RECV:
3632 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3633 break;
3634 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3635 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3636 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3637 break;
3638 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3639 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3640 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3641 break;
3642 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3643 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3644 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3645 break;
3646 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3647 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3648 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3649 break;
3650 case SCTP_NOTIFY_ASCONF_ADD_IP:
3651 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3652 error);
3653 break;
3654 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3655 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3656 error);
3657 break;
3658 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3659 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3660 error);
3661 break;
3662 case SCTP_NOTIFY_PEER_SHUTDOWN:
3663 sctp_notify_shutdown_event(stcb);
3664 break;
3665 case SCTP_NOTIFY_AUTH_NEW_KEY:
3666 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3667 (uint16_t) (uintptr_t) data,
3668 so_locked);
3669 break;
3670 case SCTP_NOTIFY_AUTH_FREE_KEY:
3671 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3672 (uint16_t) (uintptr_t) data,
3673 so_locked);
3674 break;
3675 case SCTP_NOTIFY_NO_PEER_AUTH:
3676 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3677 (uint16_t) (uintptr_t) data,
3678 so_locked);
3679 break;
3680 case SCTP_NOTIFY_SENDER_DRY:
3681 sctp_notify_sender_dry_event(stcb, so_locked);
3682 break;
3683 case SCTP_NOTIFY_REMOTE_ERROR:
3684 sctp_notify_remote_error(stcb, error, data);
3685 break;
3686 default:
3687 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3688 __FUNCTION__, notification, notification);
3689 break;
3690 } /* end switch */
3691 }
3692
3693 void
3694 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3695 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3696 SCTP_UNUSED
3697 #endif
3698 )
3699 {
3700 struct sctp_association *asoc;
3701 struct sctp_stream_out *outs;
3702 struct sctp_tmit_chunk *chk, *nchk;
3703 struct sctp_stream_queue_pending *sp, *nsp;
3704 int i;
3705
3706 if (stcb == NULL) {
3707 return;
3708 }
3709 asoc = &stcb->asoc;
3710 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3711 /* already being freed */
3712 return;
3713 }
3714 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3715 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3716 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3717 return;
3718 }
3719 /* now through all the gunk freeing chunks */
3720 if (holds_lock == 0) {
3721 SCTP_TCB_SEND_LOCK(stcb);
3722 }
3723 /* sent queue SHOULD be empty */
3724 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3725 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3726 asoc->sent_queue_cnt--;
3727 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3728 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3729 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3730 #ifdef INVARIANTS
3731 } else {
3732 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3733 #endif
3734 }
3735 }
3736 if (chk->data != NULL) {
3737 sctp_free_bufspace(stcb, asoc, chk, 1);
3738 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3739 error, chk, so_locked);
3740 if (chk->data) {
3741 sctp_m_freem(chk->data);
3742 chk->data = NULL;
3743 }
3744 }
3745 sctp_free_a_chunk(stcb, chk, so_locked);
3746 /* sa_ignore FREED_MEMORY */
3747 }
3748 /* pending send queue SHOULD be empty */
3749 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3750 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3751 asoc->send_queue_cnt--;
3752 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3753 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3754 #ifdef INVARIANTS
3755 } else {
3756 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3757 #endif
3758 }
3759 if (chk->data != NULL) {
3760 sctp_free_bufspace(stcb, asoc, chk, 1);
3761 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3762 error, chk, so_locked);
3763 if (chk->data) {
3764 sctp_m_freem(chk->data);
3765 chk->data = NULL;
3766 }
3767 }
3768 sctp_free_a_chunk(stcb, chk, so_locked);
3769 /* sa_ignore FREED_MEMORY */
3770 }
3771 for (i = 0; i < asoc->streamoutcnt; i++) {
3772 /* For each stream */
3773 outs = &asoc->strmout[i];
3774 /* clean up any sends there */
3775 asoc->locked_on_sending = NULL;
3776 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3777 asoc->stream_queue_cnt--;
3778 TAILQ_REMOVE(&outs->outqueue, sp, next);
3779 sctp_free_spbufspace(stcb, asoc, sp);
3780 if (sp->data) {
3781 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3782 error, (void *)sp, so_locked);
3783 if (sp->data) {
3784 sctp_m_freem(sp->data);
3785 sp->data = NULL;
3786 sp->tail_mbuf = NULL;
3787 sp->length = 0;
3788 }
3789 }
3790 if (sp->net) {
3791 sctp_free_remote_addr(sp->net);
3792 sp->net = NULL;
3793 }
3794 /* Free the chunk */
3795 sctp_free_a_strmoq(stcb, sp, so_locked);
3796 /* sa_ignore FREED_MEMORY */
3797 }
3798 }
3799
3800 if (holds_lock == 0) {
3801 SCTP_TCB_SEND_UNLOCK(stcb);
3802 }
3803 }
3804
3805 void
3806 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3807 struct sctp_abort_chunk *abort, int so_locked
3808 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3809 SCTP_UNUSED
3810 #endif
3811 )
3812 {
3813 if (stcb == NULL) {
3814 return;
3815 }
3816 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3817 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3818 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3819 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3820 }
3821 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3822 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3823 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3824 return;
3825 }
3826 /* Tell them we lost the asoc */
3827 sctp_report_all_outbound(stcb, error, 1, so_locked);
3828 if (from_peer) {
3829 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3830 } else {
3831 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3832 }
3833 }
3834
3835 void
3836 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3837 struct mbuf *m, int iphlen,
3838 struct sockaddr *src, struct sockaddr *dst,
3839 struct sctphdr *sh, struct mbuf *op_err,
3840 uint8_t use_mflowid, uint32_t mflowid,
3841 uint32_t vrf_id, uint16_t port)
3842 {
3843 uint32_t vtag;
3844
3845 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3846 struct socket *so;
3847
3848 #endif
3849
3850 vtag = 0;
3851 if (stcb != NULL) {
3852 /* We have a TCB to abort, send notification too */
3853 vtag = stcb->asoc.peer_vtag;
3854 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3855 /* get the assoc vrf id and table id */
3856 vrf_id = stcb->asoc.vrf_id;
3857 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3858 }
3859 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3860 use_mflowid, mflowid,
3861 vrf_id, port);
3862 if (stcb != NULL) {
3863 /* Ok, now lets free it */
3864 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3865 so = SCTP_INP_SO(inp);
3866 atomic_add_int(&stcb->asoc.refcnt, 1);
3867 SCTP_TCB_UNLOCK(stcb);
3868 SCTP_SOCKET_LOCK(so, 1);
3869 SCTP_TCB_LOCK(stcb);
3870 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3871 #endif
3872 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3873 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3874 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3875 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3876 }
3877 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3878 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3879 SCTP_SOCKET_UNLOCK(so, 1);
3880 #endif
3881 }
3882 }
3883
3884 #ifdef SCTP_ASOCLOG_OF_TSNS
3885 void
3886 sctp_print_out_track_log(struct sctp_tcb *stcb)
3887 {
3888 #ifdef NOSIY_PRINTS
3889 int i;
3890
3891 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3892 SCTP_PRINTF("IN bound TSN log-aaa\n");
3893 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3894 SCTP_PRINTF("None rcvd\n");
3895 goto none_in;
3896 }
3897 if (stcb->asoc.tsn_in_wrapped) {
3898 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3899 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3900 stcb->asoc.in_tsnlog[i].tsn,
3901 stcb->asoc.in_tsnlog[i].strm,
3902 stcb->asoc.in_tsnlog[i].seq,
3903 stcb->asoc.in_tsnlog[i].flgs,
3904 stcb->asoc.in_tsnlog[i].sz);
3905 }
3906 }
3907 if (stcb->asoc.tsn_in_at) {
3908 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3909 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3910 stcb->asoc.in_tsnlog[i].tsn,
3911 stcb->asoc.in_tsnlog[i].strm,
3912 stcb->asoc.in_tsnlog[i].seq,
3913 stcb->asoc.in_tsnlog[i].flgs,
3914 stcb->asoc.in_tsnlog[i].sz);
3915 }
3916 }
3917 none_in:
3918 SCTP_PRINTF("OUT bound TSN log-aaa\n");
3919 if ((stcb->asoc.tsn_out_at == 0) &&
3920 (stcb->asoc.tsn_out_wrapped == 0)) {
3921 SCTP_PRINTF("None sent\n");
3922 }
3923 if (stcb->asoc.tsn_out_wrapped) {
3924 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3925 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3926 stcb->asoc.out_tsnlog[i].tsn,
3927 stcb->asoc.out_tsnlog[i].strm,
3928 stcb->asoc.out_tsnlog[i].seq,
3929 stcb->asoc.out_tsnlog[i].flgs,
3930 stcb->asoc.out_tsnlog[i].sz);
3931 }
3932 }
3933 if (stcb->asoc.tsn_out_at) {
3934 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3935 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3936 stcb->asoc.out_tsnlog[i].tsn,
3937 stcb->asoc.out_tsnlog[i].strm,
3938 stcb->asoc.out_tsnlog[i].seq,
3939 stcb->asoc.out_tsnlog[i].flgs,
3940 stcb->asoc.out_tsnlog[i].sz);
3941 }
3942 }
3943 #endif
3944 }
3945
3946 #endif
3947
3948 void
3949 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3950 struct mbuf *op_err,
3951 int so_locked
3952 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3953 SCTP_UNUSED
3954 #endif
3955 )
3956 {
3957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958 struct socket *so;
3959
3960 #endif
3961
3962 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3963 so = SCTP_INP_SO(inp);
3964 #endif
3965 if (stcb == NULL) {
3966 /* Got to have a TCB */
3967 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3968 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3969 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3970 SCTP_CALLED_DIRECTLY_NOCMPSET);
3971 }
3972 }
3973 return;
3974 } else {
3975 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3976 }
3977 /* notify the ulp */
3978 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3979 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3980 }
3981 /* notify the peer */
3982 sctp_send_abort_tcb(stcb, op_err, so_locked);
3983 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3984 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3985 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3986 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3987 }
3988 /* now free the asoc */
3989 #ifdef SCTP_ASOCLOG_OF_TSNS
3990 sctp_print_out_track_log(stcb);
3991 #endif
3992 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3993 if (!so_locked) {
3994 atomic_add_int(&stcb->asoc.refcnt, 1);
3995 SCTP_TCB_UNLOCK(stcb);
3996 SCTP_SOCKET_LOCK(so, 1);
3997 SCTP_TCB_LOCK(stcb);
3998 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3999 }
4000 #endif
4001 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4003 if (!so_locked) {
4004 SCTP_SOCKET_UNLOCK(so, 1);
4005 }
4006 #endif
4007 }
4008
4009 void
4010 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4011 struct sockaddr *src, struct sockaddr *dst,
4012 struct sctphdr *sh, struct sctp_inpcb *inp,
4013 uint8_t use_mflowid, uint32_t mflowid,
4014 uint32_t vrf_id, uint16_t port)
4015 {
4016 struct sctp_chunkhdr *ch, chunk_buf;
4017 unsigned int chk_length;
4018 int contains_init_chunk;
4019
4020 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4021 /* Generate a TO address for future reference */
4022 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4023 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4024 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4025 SCTP_CALLED_DIRECTLY_NOCMPSET);
4026 }
4027 }
4028 contains_init_chunk = 0;
4029 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4030 sizeof(*ch), (uint8_t *) & chunk_buf);
4031 while (ch != NULL) {
4032 chk_length = ntohs(ch->chunk_length);
4033 if (chk_length < sizeof(*ch)) {
4034 /* break to abort land */
4035 break;
4036 }
4037 switch (ch->chunk_type) {
4038 case SCTP_INIT:
4039 contains_init_chunk = 1;
4040 break;
4041 case SCTP_COOKIE_ECHO:
4042 /* We hit here only if the assoc is being freed */
4043 return;
4044 case SCTP_PACKET_DROPPED:
4045 /* we don't respond to pkt-dropped */
4046 return;
4047 case SCTP_ABORT_ASSOCIATION:
4048 /* we don't respond with an ABORT to an ABORT */
4049 return;
4050 case SCTP_SHUTDOWN_COMPLETE:
4051 /*
4052 * we ignore it since we are not waiting for it and
4053 * peer is gone
4054 */
4055 return;
4056 case SCTP_SHUTDOWN_ACK:
4057 sctp_send_shutdown_complete2(src, dst, sh,
4058 use_mflowid, mflowid,
4059 vrf_id, port);
4060 return;
4061 default:
4062 break;
4063 }
4064 offset += SCTP_SIZE32(chk_length);
4065 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4066 sizeof(*ch), (uint8_t *) & chunk_buf);
4067 }
4068 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4069 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4070 (contains_init_chunk == 0))) {
4071 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
4072 use_mflowid, mflowid,
4073 vrf_id, port);
4074 }
4075 }
4076
4077 /*
4078 * check the inbound datagram to make sure there is not an abort inside it,
4079 * if there is return 1, else return 0.
4080 */
4081 int
4082 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4083 {
4084 struct sctp_chunkhdr *ch;
4085 struct sctp_init_chunk *init_chk, chunk_buf;
4086 int offset;
4087 unsigned int chk_length;
4088
4089 offset = iphlen + sizeof(struct sctphdr);
4090 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4091 (uint8_t *) & chunk_buf);
4092 while (ch != NULL) {
4093 chk_length = ntohs(ch->chunk_length);
4094 if (chk_length < sizeof(*ch)) {
4095 /* packet is probably corrupt */
4096 break;
4097 }
4098 /* we seem to be ok, is it an abort? */
4099 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4100 /* yep, tell them */
4101 return (1);
4102 }
4103 if (ch->chunk_type == SCTP_INITIATION) {
4104 /* need to update the Vtag */
4105 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4106 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4107 if (init_chk != NULL) {
4108 *vtagfill = ntohl(init_chk->init.initiate_tag);
4109 }
4110 }
4111 /* Nope, move to the next chunk */
4112 offset += SCTP_SIZE32(chk_length);
4113 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4114 sizeof(*ch), (uint8_t *) & chunk_buf);
4115 }
4116 return (0);
4117 }
4118
4119 /*
4120 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4121 * set (i.e. it's 0) so, create this function to compare link local scopes
4122 */
4123 #ifdef INET6
4124 uint32_t
4125 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4126 {
4127 struct sockaddr_in6 a, b;
4128
4129 /* save copies */
4130 a = *addr1;
4131 b = *addr2;
4132
4133 if (a.sin6_scope_id == 0)
4134 if (sa6_recoverscope(&a)) {
4135 /* can't get scope, so can't match */
4136 return (0);
4137 }
4138 if (b.sin6_scope_id == 0)
4139 if (sa6_recoverscope(&b)) {
4140 /* can't get scope, so can't match */
4141 return (0);
4142 }
4143 if (a.sin6_scope_id != b.sin6_scope_id)
4144 return (0);
4145
4146 return (1);
4147 }
4148
4149 /*
4150 * returns a sockaddr_in6 with embedded scope recovered and removed
4151 */
4152 struct sockaddr_in6 *
4153 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4154 {
4155 /* check and strip embedded scope junk */
4156 if (addr->sin6_family == AF_INET6) {
4157 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4158 if (addr->sin6_scope_id == 0) {
4159 *store = *addr;
4160 if (!sa6_recoverscope(store)) {
4161 /* use the recovered scope */
4162 addr = store;
4163 }
4164 } else {
4165 /* else, return the original "to" addr */
4166 in6_clearscope(&addr->sin6_addr);
4167 }
4168 }
4169 }
4170 return (addr);
4171 }
4172
4173 #endif
4174
4175 /*
4176 * are the two addresses the same? currently a "scopeless" check returns: 1
4177 * if same, 0 if not
4178 */
4179 int
4180 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4181 {
4182
4183 /* must be valid */
4184 if (sa1 == NULL || sa2 == NULL)
4185 return (0);
4186
4187 /* must be the same family */
4188 if (sa1->sa_family != sa2->sa_family)
4189 return (0);
4190
4191 switch (sa1->sa_family) {
4192 #ifdef INET6
4193 case AF_INET6:
4194 {
4195 /* IPv6 addresses */
4196 struct sockaddr_in6 *sin6_1, *sin6_2;
4197
4198 sin6_1 = (struct sockaddr_in6 *)sa1;
4199 sin6_2 = (struct sockaddr_in6 *)sa2;
4200 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4201 sin6_2));
4202 }
4203 #endif
4204 #ifdef INET
4205 case AF_INET:
4206 {
4207 /* IPv4 addresses */
4208 struct sockaddr_in *sin_1, *sin_2;
4209
4210 sin_1 = (struct sockaddr_in *)sa1;
4211 sin_2 = (struct sockaddr_in *)sa2;
4212 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4213 }
4214 #endif
4215 default:
4216 /* we don't do these... */
4217 return (0);
4218 }
4219 }
4220
4221 void
4222 sctp_print_address(struct sockaddr *sa)
4223 {
4224 #ifdef INET6
4225 char ip6buf[INET6_ADDRSTRLEN];
4226
4227 #endif
4228
4229 switch (sa->sa_family) {
4230 #ifdef INET6
4231 case AF_INET6:
4232 {
4233 struct sockaddr_in6 *sin6;
4234
4235 sin6 = (struct sockaddr_in6 *)sa;
4236 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4237 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4238 ntohs(sin6->sin6_port),
4239 sin6->sin6_scope_id);
4240 break;
4241 }
4242 #endif
4243 #ifdef INET
4244 case AF_INET:
4245 {
4246 struct sockaddr_in *sin;
4247 unsigned char *p;
4248
4249 sin = (struct sockaddr_in *)sa;
4250 p = (unsigned char *)&sin->sin_addr;
4251 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4252 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4253 break;
4254 }
4255 #endif
4256 default:
4257 SCTP_PRINTF("?\n");
4258 break;
4259 }
4260 }
4261
4262 void
4263 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4264 struct sctp_inpcb *new_inp,
4265 struct sctp_tcb *stcb,
4266 int waitflags)
4267 {
4268 /*
4269 * go through our old INP and pull off any control structures that
4270 * belong to stcb and move then to the new inp.
4271 */
4272 struct socket *old_so, *new_so;
4273 struct sctp_queued_to_read *control, *nctl;
4274 struct sctp_readhead tmp_queue;
4275 struct mbuf *m;
4276 int error = 0;
4277
4278 old_so = old_inp->sctp_socket;
4279 new_so = new_inp->sctp_socket;
4280 TAILQ_INIT(&tmp_queue);
4281 error = sblock(&old_so->so_rcv, waitflags);
4282 if (error) {
4283 /*
4284 * Gak, can't get sblock, we have a problem. data will be
4285 * left stranded.. and we don't dare look at it since the
4286 * other thread may be reading something. Oh well, its a
4287 * screwed up app that does a peeloff OR a accept while
4288 * reading from the main socket... actually its only the
4289 * peeloff() case, since I think read will fail on a
4290 * listening socket..
4291 */
4292 return;
4293 }
4294 /* lock the socket buffers */
4295 SCTP_INP_READ_LOCK(old_inp);
4296 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4297 /* Pull off all for out target stcb */
4298 if (control->stcb == stcb) {
4299 /* remove it we want it */
4300 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4301 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4302 m = control->data;
4303 while (m) {
4304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4305 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4306 }
4307 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4309 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4310 }
4311 m = SCTP_BUF_NEXT(m);
4312 }
4313 }
4314 }
4315 SCTP_INP_READ_UNLOCK(old_inp);
4316 /* Remove the sb-lock on the old socket */
4317
4318 sbunlock(&old_so->so_rcv);
4319 /* Now we move them over to the new socket buffer */
4320 SCTP_INP_READ_LOCK(new_inp);
4321 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4322 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4323 m = control->data;
4324 while (m) {
4325 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4326 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4327 }
4328 sctp_sballoc(stcb, &new_so->so_rcv, m);
4329 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4330 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4331 }
4332 m = SCTP_BUF_NEXT(m);
4333 }
4334 }
4335 SCTP_INP_READ_UNLOCK(new_inp);
4336 }
4337
4338 void
4339 sctp_add_to_readq(struct sctp_inpcb *inp,
4340 struct sctp_tcb *stcb,
4341 struct sctp_queued_to_read *control,
4342 struct sockbuf *sb,
4343 int end,
4344 int inp_read_lock_held,
4345 int so_locked
4346 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4347 SCTP_UNUSED
4348 #endif
4349 )
4350 {
4351 /*
4352 * Here we must place the control on the end of the socket read
4353 * queue AND increment sb_cc so that select will work properly on
4354 * read.
4355 */
4356 struct mbuf *m, *prev = NULL;
4357
4358 if (inp == NULL) {
4359 /* Gak, TSNH!! */
4360 #ifdef INVARIANTS
4361 panic("Gak, inp NULL on add_to_readq");
4362 #endif
4363 return;
4364 }
4365 if (inp_read_lock_held == 0)
4366 SCTP_INP_READ_LOCK(inp);
4367 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4368 sctp_free_remote_addr(control->whoFrom);
4369 if (control->data) {
4370 sctp_m_freem(control->data);
4371 control->data = NULL;
4372 }
4373 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4374 if (inp_read_lock_held == 0)
4375 SCTP_INP_READ_UNLOCK(inp);
4376 return;
4377 }
4378 if (!(control->spec_flags & M_NOTIFICATION)) {
4379 atomic_add_int(&inp->total_recvs, 1);
4380 if (!control->do_not_ref_stcb) {
4381 atomic_add_int(&stcb->total_recvs, 1);
4382 }
4383 }
4384 m = control->data;
4385 control->held_length = 0;
4386 control->length = 0;
4387 while (m) {
4388 if (SCTP_BUF_LEN(m) == 0) {
4389 /* Skip mbufs with NO length */
4390 if (prev == NULL) {
4391 /* First one */
4392 control->data = sctp_m_free(m);
4393 m = control->data;
4394 } else {
4395 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4396 m = SCTP_BUF_NEXT(prev);
4397 }
4398 if (m == NULL) {
4399 control->tail_mbuf = prev;
4400 }
4401 continue;
4402 }
4403 prev = m;
4404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4405 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4406 }
4407 sctp_sballoc(stcb, sb, m);
4408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4409 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4410 }
4411 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4412 m = SCTP_BUF_NEXT(m);
4413 }
4414 if (prev != NULL) {
4415 control->tail_mbuf = prev;
4416 } else {
4417 /* Everything got collapsed out?? */
4418 sctp_free_remote_addr(control->whoFrom);
4419 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4420 if (inp_read_lock_held == 0)
4421 SCTP_INP_READ_UNLOCK(inp);
4422 return;
4423 }
4424 if (end) {
4425 control->end_added = 1;
4426 }
4427 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4428 if (inp_read_lock_held == 0)
4429 SCTP_INP_READ_UNLOCK(inp);
4430 if (inp && inp->sctp_socket) {
4431 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4432 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4433 } else {
4434 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4435 struct socket *so;
4436
4437 so = SCTP_INP_SO(inp);
4438 if (!so_locked) {
4439 if (stcb) {
4440 atomic_add_int(&stcb->asoc.refcnt, 1);
4441 SCTP_TCB_UNLOCK(stcb);
4442 }
4443 SCTP_SOCKET_LOCK(so, 1);
4444 if (stcb) {
4445 SCTP_TCB_LOCK(stcb);
4446 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4447 }
4448 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4449 SCTP_SOCKET_UNLOCK(so, 1);
4450 return;
4451 }
4452 }
4453 #endif
4454 sctp_sorwakeup(inp, inp->sctp_socket);
4455 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4456 if (!so_locked) {
4457 SCTP_SOCKET_UNLOCK(so, 1);
4458 }
4459 #endif
4460 }
4461 }
4462 }
4463
4464
4465 int
4466 sctp_append_to_readq(struct sctp_inpcb *inp,
4467 struct sctp_tcb *stcb,
4468 struct sctp_queued_to_read *control,
4469 struct mbuf *m,
4470 int end,
4471 int ctls_cumack,
4472 struct sockbuf *sb)
4473 {
4474 /*
4475 * A partial delivery API event is underway. OR we are appending on
4476 * the reassembly queue.
4477 *
4478 * If PDAPI this means we need to add m to the end of the data.
4479 * Increase the length in the control AND increment the sb_cc.
4480 * Otherwise sb is NULL and all we need to do is put it at the end
4481 * of the mbuf chain.
4482 */
4483 int len = 0;
4484 struct mbuf *mm, *tail = NULL, *prev = NULL;
4485
4486 if (inp) {
4487 SCTP_INP_READ_LOCK(inp);
4488 }
4489 if (control == NULL) {
4490 get_out:
4491 if (inp) {
4492 SCTP_INP_READ_UNLOCK(inp);
4493 }
4494 return (-1);
4495 }
4496 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4497 SCTP_INP_READ_UNLOCK(inp);
4498 return (0);
4499 }
4500 if (control->end_added) {
4501 /* huh this one is complete? */
4502 goto get_out;
4503 }
4504 mm = m;
4505 if (mm == NULL) {
4506 goto get_out;
4507 }
4508 while (mm) {
4509 if (SCTP_BUF_LEN(mm) == 0) {
4510 /* Skip mbufs with NO lenght */
4511 if (prev == NULL) {
4512 /* First one */
4513 m = sctp_m_free(mm);
4514 mm = m;
4515 } else {
4516 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4517 mm = SCTP_BUF_NEXT(prev);
4518 }
4519 continue;
4520 }
4521 prev = mm;
4522 len += SCTP_BUF_LEN(mm);
4523 if (sb) {
4524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4525 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4526 }
4527 sctp_sballoc(stcb, sb, mm);
4528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4529 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4530 }
4531 }
4532 mm = SCTP_BUF_NEXT(mm);
4533 }
4534 if (prev) {
4535 tail = prev;
4536 } else {
4537 /* Really there should always be a prev */
4538 if (m == NULL) {
4539 /* Huh nothing left? */
4540 #ifdef INVARIANTS
4541 panic("Nothing left to add?");
4542 #else
4543 goto get_out;
4544 #endif
4545 }
4546 tail = m;
4547 }
4548 if (control->tail_mbuf) {
4549 /* append */
4550 SCTP_BUF_NEXT(control->tail_mbuf) = m;
4551 control->tail_mbuf = tail;
4552 } else {
4553 /* nothing there */
4554 #ifdef INVARIANTS
4555 if (control->data != NULL) {
4556 panic("This should NOT happen");
4557 }
4558 #endif
4559 control->data = m;
4560 control->tail_mbuf = tail;
4561 }
4562 atomic_add_int(&control->length, len);
4563 if (end) {
4564 /* message is complete */
4565 if (stcb && (control == stcb->asoc.control_pdapi)) {
4566 stcb->asoc.control_pdapi = NULL;
4567 }
4568 control->held_length = 0;
4569 control->end_added = 1;
4570 }
4571 if (stcb == NULL) {
4572 control->do_not_ref_stcb = 1;
4573 }
4574 /*
4575 * When we are appending in partial delivery, the cum-ack is used
4576 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4577 * is populated in the outbound sinfo structure from the true cumack
4578 * if the association exists...
4579 */
4580 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4581 if (inp) {
4582 SCTP_INP_READ_UNLOCK(inp);
4583 }
4584 if (inp && inp->sctp_socket) {
4585 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4586 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4587 } else {
4588 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4589 struct socket *so;
4590
4591 so = SCTP_INP_SO(inp);
4592 if (stcb) {
4593 atomic_add_int(&stcb->asoc.refcnt, 1);
4594 SCTP_TCB_UNLOCK(stcb);
4595 }
4596 SCTP_SOCKET_LOCK(so, 1);
4597 if (stcb) {
4598 SCTP_TCB_LOCK(stcb);
4599 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4600 }
4601 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4602 SCTP_SOCKET_UNLOCK(so, 1);
4603 return (0);
4604 }
4605 #endif
4606 sctp_sorwakeup(inp, inp->sctp_socket);
4607 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4608 SCTP_SOCKET_UNLOCK(so, 1);
4609 #endif
4610 }
4611 }
4612 return (0);
4613 }
4614
4615
4616
4617 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4618 *************ALTERNATE ROUTING CODE
4619 */
4620
4621 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4622 *************ALTERNATE ROUTING CODE
4623 */
4624
4625 struct mbuf *
4626 sctp_generate_invmanparam(int err)
4627 {
4628 /* Return a MBUF with a invalid mandatory parameter */
4629 struct mbuf *m;
4630
4631 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4632 if (m) {
4633 struct sctp_paramhdr *ph;
4634
4635 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4636 ph = mtod(m, struct sctp_paramhdr *);
4637 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4638 ph->param_type = htons(err);
4639 }
4640 return (m);
4641 }
4642
4643 #ifdef SCTP_MBCNT_LOGGING
4644 void
4645 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4646 struct sctp_tmit_chunk *tp1, int chk_cnt)
4647 {
4648 if (tp1->data == NULL) {
4649 return;
4650 }
4651 asoc->chunks_on_out_queue -= chk_cnt;
4652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4653 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4654 asoc->total_output_queue_size,
4655 tp1->book_size,
4656 0,
4657 tp1->mbcnt);
4658 }
4659 if (asoc->total_output_queue_size >= tp1->book_size) {
4660 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4661 } else {
4662 asoc->total_output_queue_size = 0;
4663 }
4664
4665 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4666 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4667 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4668 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4669 } else {
4670 stcb->sctp_socket->so_snd.sb_cc = 0;
4671
4672 }
4673 }
4674 }
4675
4676 #endif
4677
4678 int
4679 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4680 uint8_t sent, int so_locked
4681 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4682 SCTP_UNUSED
4683 #endif
4684 )
4685 {
4686 struct sctp_stream_out *strq;
4687 struct sctp_tmit_chunk *chk = NULL, *tp2;
4688 struct sctp_stream_queue_pending *sp;
4689 uint16_t stream = 0, seq = 0;
4690 uint8_t foundeom = 0;
4691 int ret_sz = 0;
4692 int notdone;
4693 int do_wakeup_routine = 0;
4694
4695 stream = tp1->rec.data.stream_number;
4696 seq = tp1->rec.data.stream_seq;
4697 do {
4698 ret_sz += tp1->book_size;
4699 if (tp1->data != NULL) {
4700 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4701 sctp_flight_size_decrease(tp1);
4702 sctp_total_flight_decrease(stcb, tp1);
4703 }
4704 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4705 stcb->asoc.peers_rwnd += tp1->send_size;
4706 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4707 if (sent) {
4708 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4709 } else {
4710 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4711 }
4712 if (tp1->data) {
4713 sctp_m_freem(tp1->data);
4714 tp1->data = NULL;
4715 }
4716 do_wakeup_routine = 1;
4717 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4718 stcb->asoc.sent_queue_cnt_removeable--;
4719 }
4720 }
4721 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4722 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4723 SCTP_DATA_NOT_FRAG) {
4724 /* not frag'ed we ae done */
4725 notdone = 0;
4726 foundeom = 1;
4727 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4728 /* end of frag, we are done */
4729 notdone = 0;
4730 foundeom = 1;
4731 } else {
4732 /*
4733 * Its a begin or middle piece, we must mark all of
4734 * it
4735 */
4736 notdone = 1;
4737 tp1 = TAILQ_NEXT(tp1, sctp_next);
4738 }
4739 } while (tp1 && notdone);
4740 if (foundeom == 0) {
4741 /*
4742 * The multi-part message was scattered across the send and
4743 * sent queue.
4744 */
4745 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4746 if ((tp1->rec.data.stream_number != stream) ||
4747 (tp1->rec.data.stream_seq != seq)) {
4748 break;
4749 }
4750 /*
4751 * save to chk in case we have some on stream out
4752 * queue. If so and we have an un-transmitted one we
4753 * don't have to fudge the TSN.
4754 */
4755 chk = tp1;
4756 ret_sz += tp1->book_size;
4757 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4758 if (sent) {
4759 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4760 } else {
4761 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4762 }
4763 if (tp1->data) {
4764 sctp_m_freem(tp1->data);
4765 tp1->data = NULL;
4766 }
4767 /* No flight involved here book the size to 0 */
4768 tp1->book_size = 0;
4769 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4770 foundeom = 1;
4771 }
4772 do_wakeup_routine = 1;
4773 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4774 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4775 /*
4776 * on to the sent queue so we can wait for it to be
4777 * passed by.
4778 */
4779 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4780 sctp_next);
4781 stcb->asoc.send_queue_cnt--;
4782 stcb->asoc.sent_queue_cnt++;
4783 }
4784 }
4785 if (foundeom == 0) {
4786 /*
4787 * Still no eom found. That means there is stuff left on the
4788 * stream out queue.. yuck.
4789 */
4790 SCTP_TCB_SEND_LOCK(stcb);
4791 strq = &stcb->asoc.strmout[stream];
4792 sp = TAILQ_FIRST(&strq->outqueue);
4793 if (sp != NULL) {
4794 sp->discard_rest = 1;
4795 /*
4796 * We may need to put a chunk on the queue that
4797 * holds the TSN that would have been sent with the
4798 * LAST bit.
4799 */
4800 if (chk == NULL) {
4801 /* Yep, we have to */
4802 sctp_alloc_a_chunk(stcb, chk);
4803 if (chk == NULL) {
4804 /*
4805 * we are hosed. All we can do is
4806 * nothing.. which will cause an
4807 * abort if the peer is paying
4808 * attention.
4809 */
4810 goto oh_well;
4811 }
4812 memset(chk, 0, sizeof(*chk));
4813 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4814 chk->sent = SCTP_FORWARD_TSN_SKIP;
4815 chk->asoc = &stcb->asoc;
4816 chk->rec.data.stream_seq = strq->next_sequence_send;
4817 chk->rec.data.stream_number = sp->stream;
4818 chk->rec.data.payloadtype = sp->ppid;
4819 chk->rec.data.context = sp->context;
4820 chk->flags = sp->act_flags;
4821 if (sp->net)
4822 chk->whoTo = sp->net;
4823 else
4824 chk->whoTo = stcb->asoc.primary_destination;
4825 atomic_add_int(&chk->whoTo->ref_count, 1);
4826 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4827 stcb->asoc.pr_sctp_cnt++;
4828 chk->pr_sctp_on = 1;
4829 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4830 stcb->asoc.sent_queue_cnt++;
4831 stcb->asoc.pr_sctp_cnt++;
4832 } else {
4833 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4834 }
4835 strq->next_sequence_send++;
4836 oh_well:
4837 if (sp->data) {
4838 /*
4839 * Pull any data to free up the SB and allow
4840 * sender to "add more" while we will throw
4841 * away :-)
4842 */
4843 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4844 ret_sz += sp->length;
4845 do_wakeup_routine = 1;
4846 sp->some_taken = 1;
4847 sctp_m_freem(sp->data);
4848 sp->data = NULL;
4849 sp->tail_mbuf = NULL;
4850 sp->length = 0;
4851 }
4852 }
4853 SCTP_TCB_SEND_UNLOCK(stcb);
4854 }
4855 if (do_wakeup_routine) {
4856 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4857 struct socket *so;
4858
4859 so = SCTP_INP_SO(stcb->sctp_ep);
4860 if (!so_locked) {
4861 atomic_add_int(&stcb->asoc.refcnt, 1);
4862 SCTP_TCB_UNLOCK(stcb);
4863 SCTP_SOCKET_LOCK(so, 1);
4864 SCTP_TCB_LOCK(stcb);
4865 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4866 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4867 /* assoc was freed while we were unlocked */
4868 SCTP_SOCKET_UNLOCK(so, 1);
4869 return (ret_sz);
4870 }
4871 }
4872 #endif
4873 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4874 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4875 if (!so_locked) {
4876 SCTP_SOCKET_UNLOCK(so, 1);
4877 }
4878 #endif
4879 }
4880 return (ret_sz);
4881 }
4882
4883 /*
4884 * checks to see if the given address, sa, is one that is currently known by
4885 * the kernel note: can't distinguish the same address on multiple interfaces
4886 * and doesn't handle multiple addresses with different zone/scope id's note:
4887 * ifa_ifwithaddr() compares the entire sockaddr struct
4888 */
4889 struct sctp_ifa *
4890 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4891 int holds_lock)
4892 {
4893 struct sctp_laddr *laddr;
4894
4895 if (holds_lock == 0) {
4896 SCTP_INP_RLOCK(inp);
4897 }
4898 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4899 if (laddr->ifa == NULL)
4900 continue;
4901 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4902 continue;
4903 #ifdef INET
4904 if (addr->sa_family == AF_INET) {
4905 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4906 laddr->ifa->address.sin.sin_addr.s_addr) {
4907 /* found him. */
4908 if (holds_lock == 0) {
4909 SCTP_INP_RUNLOCK(inp);
4910 }
4911 return (laddr->ifa);
4912 break;
4913 }
4914 }
4915 #endif
4916 #ifdef INET6
4917 if (addr->sa_family == AF_INET6) {
4918 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4919 &laddr->ifa->address.sin6)) {
4920 /* found him. */
4921 if (holds_lock == 0) {
4922 SCTP_INP_RUNLOCK(inp);
4923 }
4924 return (laddr->ifa);
4925 break;
4926 }
4927 }
4928 #endif
4929 }
4930 if (holds_lock == 0) {
4931 SCTP_INP_RUNLOCK(inp);
4932 }
4933 return (NULL);
4934 }
4935
4936 uint32_t
4937 sctp_get_ifa_hash_val(struct sockaddr *addr)
4938 {
4939 switch (addr->sa_family) {
4940 #ifdef INET
4941 case AF_INET:
4942 {
4943 struct sockaddr_in *sin;
4944
4945 sin = (struct sockaddr_in *)addr;
4946 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4947 }
4948 #endif
4949 #ifdef INET6
4950 case AF_INET6:
4951 {
4952 struct sockaddr_in6 *sin6;
4953 uint32_t hash_of_addr;
4954
4955 sin6 = (struct sockaddr_in6 *)addr;
4956 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4957 sin6->sin6_addr.s6_addr32[1] +
4958 sin6->sin6_addr.s6_addr32[2] +
4959 sin6->sin6_addr.s6_addr32[3]);
4960 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4961 return (hash_of_addr);
4962 }
4963 #endif
4964 default:
4965 break;
4966 }
4967 return (0);
4968 }
4969
4970 struct sctp_ifa *
4971 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4972 {
4973 struct sctp_ifa *sctp_ifap;
4974 struct sctp_vrf *vrf;
4975 struct sctp_ifalist *hash_head;
4976 uint32_t hash_of_addr;
4977
4978 if (holds_lock == 0)
4979 SCTP_IPI_ADDR_RLOCK();
4980
4981 vrf = sctp_find_vrf(vrf_id);
4982 if (vrf == NULL) {
4983 stage_right:
4984 if (holds_lock == 0)
4985 SCTP_IPI_ADDR_RUNLOCK();
4986 return (NULL);
4987 }
4988 hash_of_addr = sctp_get_ifa_hash_val(addr);
4989
4990 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4991 if (hash_head == NULL) {
4992 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4993 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4994 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4995 sctp_print_address(addr);
4996 SCTP_PRINTF("No such bucket for address\n");
4997 if (holds_lock == 0)
4998 SCTP_IPI_ADDR_RUNLOCK();
4999
5000 return (NULL);
5001 }
5002 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5003 if (sctp_ifap == NULL) {
5004 #ifdef INVARIANTS
5005 panic("Huh LIST_FOREACH corrupt");
5006 goto stage_right;
5007 #else
5008 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5009 goto stage_right;
5010 #endif
5011 }
5012 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5013 continue;
5014 #ifdef INET
5015 if (addr->sa_family == AF_INET) {
5016 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5017 sctp_ifap->address.sin.sin_addr.s_addr) {
5018 /* found him. */
5019 if (holds_lock == 0)
5020 SCTP_IPI_ADDR_RUNLOCK();
5021 return (sctp_ifap);
5022 break;
5023 }
5024 }
5025 #endif
5026 #ifdef INET6
5027 if (addr->sa_family == AF_INET6) {
5028 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5029 &sctp_ifap->address.sin6)) {
5030 /* found him. */
5031 if (holds_lock == 0)
5032 SCTP_IPI_ADDR_RUNLOCK();
5033 return (sctp_ifap);
5034 break;
5035 }
5036 }
5037 #endif
5038 }
5039 if (holds_lock == 0)
5040 SCTP_IPI_ADDR_RUNLOCK();
5041 return (NULL);
5042 }
5043
5044 static void
5045 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5046 uint32_t rwnd_req)
5047 {
5048 /* User pulled some data, do we need a rwnd update? */
5049 int r_unlocked = 0;
5050 uint32_t dif, rwnd;
5051 struct socket *so = NULL;
5052
5053 if (stcb == NULL)
5054 return;
5055
5056 atomic_add_int(&stcb->asoc.refcnt, 1);
5057
5058 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5059 SCTP_STATE_SHUTDOWN_RECEIVED |
5060 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5061 /* Pre-check If we are freeing no update */
5062 goto no_lock;
5063 }
5064 SCTP_INP_INCR_REF(stcb->sctp_ep);
5065 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5066 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5067 goto out;
5068 }
5069 so = stcb->sctp_socket;
5070 if (so == NULL) {
5071 goto out;
5072 }
5073 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5074 /* Have you have freed enough to look */
5075 *freed_so_far = 0;
5076 /* Yep, its worth a look and the lock overhead */
5077
5078 /* Figure out what the rwnd would be */
5079 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5080 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5081 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5082 } else {
5083 dif = 0;
5084 }
5085 if (dif >= rwnd_req) {
5086 if (hold_rlock) {
5087 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5088 r_unlocked = 1;
5089 }
5090 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5091 /*
5092 * One last check before we allow the guy possibly
5093 * to get in. There is a race, where the guy has not
5094 * reached the gate. In that case
5095 */
5096 goto out;
5097 }
5098 SCTP_TCB_LOCK(stcb);
5099 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5100 /* No reports here */
5101 SCTP_TCB_UNLOCK(stcb);
5102 goto out;
5103 }
5104 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5105 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5106
5107 sctp_chunk_output(stcb->sctp_ep, stcb,
5108 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5109 /* make sure no timer is running */
5110 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5111 SCTP_TCB_UNLOCK(stcb);
5112 } else {
5113 /* Update how much we have pending */
5114 stcb->freed_by_sorcv_sincelast = dif;
5115 }
5116 out:
5117 if (so && r_unlocked && hold_rlock) {
5118 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5119 }
5120 SCTP_INP_DECR_REF(stcb->sctp_ep);
5121 no_lock:
5122 atomic_add_int(&stcb->asoc.refcnt, -1);
5123 return;
5124 }
5125
5126 int
5127 sctp_sorecvmsg(struct socket *so,
5128 struct uio *uio,
5129 struct mbuf **mp,
5130 struct sockaddr *from,
5131 int fromlen,
5132 int *msg_flags,
5133 struct sctp_sndrcvinfo *sinfo,
5134 int filling_sinfo)
5135 {
5136 /*
5137 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5138 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5139 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5140 * On the way out we may send out any combination of:
5141 * MSG_NOTIFICATION MSG_EOR
5142 *
5143 */
5144 struct sctp_inpcb *inp = NULL;
5145 int my_len = 0;
5146 int cp_len = 0, error = 0;
5147 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5148 struct mbuf *m = NULL;
5149 struct sctp_tcb *stcb = NULL;
5150 int wakeup_read_socket = 0;
5151 int freecnt_applied = 0;
5152 int out_flags = 0, in_flags = 0;
5153 int block_allowed = 1;
5154 uint32_t freed_so_far = 0;
5155 uint32_t copied_so_far = 0;
5156 int in_eeor_mode = 0;
5157 int no_rcv_needed = 0;
5158 uint32_t rwnd_req = 0;
5159 int hold_sblock = 0;
5160 int hold_rlock = 0;
5161 int slen = 0;
5162 uint32_t held_length = 0;
5163 int sockbuf_lock = 0;
5164
5165 if (uio == NULL) {
5166 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5167 return (EINVAL);
5168 }
5169 if (msg_flags) {
5170 in_flags = *msg_flags;
5171 if (in_flags & MSG_PEEK)
5172 SCTP_STAT_INCR(sctps_read_peeks);
5173 } else {
5174 in_flags = 0;
5175 }
5176 slen = uio->uio_resid;
5177
5178 /* Pull in and set up our int flags */
5179 if (in_flags & MSG_OOB) {
5180 /* Out of band's NOT supported */
5181 return (EOPNOTSUPP);
5182 }
5183 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5184 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5185 return (EINVAL);
5186 }
5187 if ((in_flags & (MSG_DONTWAIT
5188 | MSG_NBIO
5189 )) ||
5190 SCTP_SO_IS_NBIO(so)) {
5191 block_allowed = 0;
5192 }
5193 /* setup the endpoint */
5194 inp = (struct sctp_inpcb *)so->so_pcb;
5195 if (inp == NULL) {
5196 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5197 return (EFAULT);
5198 }
5199 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5200 /* Must be at least a MTU's worth */
5201 if (rwnd_req < SCTP_MIN_RWND)
5202 rwnd_req = SCTP_MIN_RWND;
5203 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5205 sctp_misc_ints(SCTP_SORECV_ENTER,
5206 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5207 }
5208 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5209 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5210 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5211 }
5212 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5213 if (error) {
5214 goto release_unlocked;
5215 }
5216 sockbuf_lock = 1;
5217 restart:
5218
5219
5220 restart_nosblocks:
5221 if (hold_sblock == 0) {
5222 SOCKBUF_LOCK(&so->so_rcv);
5223 hold_sblock = 1;
5224 }
5225 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5226 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5227 goto out;
5228 }
5229 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5230 if (so->so_error) {
5231 error = so->so_error;
5232 if ((in_flags & MSG_PEEK) == 0)
5233 so->so_error = 0;
5234 goto out;
5235 } else {
5236 if (so->so_rcv.sb_cc == 0) {
5237 /* indicate EOF */
5238 error = 0;
5239 goto out;
5240 }
5241 }
5242 }
5243 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5244 /* we need to wait for data */
5245 if ((so->so_rcv.sb_cc == 0) &&
5246 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5247 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5248 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5249 /*
5250 * For active open side clear flags for
5251 * re-use passive open is blocked by
5252 * connect.
5253 */
5254 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5255 /*
5256 * You were aborted, passive side
5257 * always hits here
5258 */
5259 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5260 error = ECONNRESET;
5261 }
5262 so->so_state &= ~(SS_ISCONNECTING |
5263 SS_ISDISCONNECTING |
5264 SS_ISCONFIRMING |
5265 SS_ISCONNECTED);
5266 if (error == 0) {
5267 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5268 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5269 error = ENOTCONN;
5270 }
5271 }
5272 goto out;
5273 }
5274 }
5275 error = sbwait(&so->so_rcv);
5276 if (error) {
5277 goto out;
5278 }
5279 held_length = 0;
5280 goto restart_nosblocks;
5281 } else if (so->so_rcv.sb_cc == 0) {
5282 if (so->so_error) {
5283 error = so->so_error;
5284 if ((in_flags & MSG_PEEK) == 0)
5285 so->so_error = 0;
5286 } else {
5287 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5288 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5289 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5290 /*
5291 * For active open side clear flags
5292 * for re-use passive open is
5293 * blocked by connect.
5294 */
5295 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5296 /*
5297 * You were aborted, passive
5298 * side always hits here
5299 */
5300 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5301 error = ECONNRESET;
5302 }
5303 so->so_state &= ~(SS_ISCONNECTING |
5304 SS_ISDISCONNECTING |
5305 SS_ISCONFIRMING |
5306 SS_ISCONNECTED);
5307 if (error == 0) {
5308 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5309 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5310 error = ENOTCONN;
5311 }
5312 }
5313 goto out;
5314 }
5315 }
5316 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5317 error = EWOULDBLOCK;
5318 }
5319 goto out;
5320 }
5321 if (hold_sblock == 1) {
5322 SOCKBUF_UNLOCK(&so->so_rcv);
5323 hold_sblock = 0;
5324 }
5325 /* we possibly have data we can read */
5326 /* sa_ignore FREED_MEMORY */
5327 control = TAILQ_FIRST(&inp->read_queue);
5328 if (control == NULL) {
5329 /*
5330 * This could be happening since the appender did the
5331 * increment but as not yet did the tailq insert onto the
5332 * read_queue
5333 */
5334 if (hold_rlock == 0) {
5335 SCTP_INP_READ_LOCK(inp);
5336 }
5337 control = TAILQ_FIRST(&inp->read_queue);
5338 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5339 #ifdef INVARIANTS
5340 panic("Huh, its non zero and nothing on control?");
5341 #endif
5342 so->so_rcv.sb_cc = 0;
5343 }
5344 SCTP_INP_READ_UNLOCK(inp);
5345 hold_rlock = 0;
5346 goto restart;
5347 }
5348 if ((control->length == 0) &&
5349 (control->do_not_ref_stcb)) {
5350 /*
5351 * Clean up code for freeing assoc that left behind a
5352 * pdapi.. maybe a peer in EEOR that just closed after
5353 * sending and never indicated a EOR.
5354 */
5355 if (hold_rlock == 0) {
5356 hold_rlock = 1;
5357 SCTP_INP_READ_LOCK(inp);
5358 }
5359 control->held_length = 0;
5360 if (control->data) {
5361 /* Hmm there is data here .. fix */
5362 struct mbuf *m_tmp;
5363 int cnt = 0;
5364
5365 m_tmp = control->data;
5366 while (m_tmp) {
5367 cnt += SCTP_BUF_LEN(m_tmp);
5368 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5369 control->tail_mbuf = m_tmp;
5370 control->end_added = 1;
5371 }
5372 m_tmp = SCTP_BUF_NEXT(m_tmp);
5373 }
5374 control->length = cnt;
5375 } else {
5376 /* remove it */
5377 TAILQ_REMOVE(&inp->read_queue, control, next);
5378 /* Add back any hiddend data */
5379 sctp_free_remote_addr(control->whoFrom);
5380 sctp_free_a_readq(stcb, control);
5381 }
5382 if (hold_rlock) {
5383 hold_rlock = 0;
5384 SCTP_INP_READ_UNLOCK(inp);
5385 }
5386 goto restart;
5387 }
5388 if ((control->length == 0) &&
5389 (control->end_added == 1)) {
5390 /*
5391 * Do we also need to check for (control->pdapi_aborted ==
5392 * 1)?
5393 */
5394 if (hold_rlock == 0) {
5395 hold_rlock = 1;
5396 SCTP_INP_READ_LOCK(inp);
5397 }
5398 TAILQ_REMOVE(&inp->read_queue, control, next);
5399 if (control->data) {
5400 #ifdef INVARIANTS
5401 panic("control->data not null but control->length == 0");
5402 #else
5403 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5404 sctp_m_freem(control->data);
5405 control->data = NULL;
5406 #endif
5407 }
5408 if (control->aux_data) {
5409 sctp_m_free(control->aux_data);
5410 control->aux_data = NULL;
5411 }
5412 sctp_free_remote_addr(control->whoFrom);
5413 sctp_free_a_readq(stcb, control);
5414 if (hold_rlock) {
5415 hold_rlock = 0;
5416 SCTP_INP_READ_UNLOCK(inp);
5417 }
5418 goto restart;
5419 }
5420 if (control->length == 0) {
5421 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5422 (filling_sinfo)) {
5423 /* find a more suitable one then this */
5424 ctl = TAILQ_NEXT(control, next);
5425 while (ctl) {
5426 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5427 (ctl->some_taken ||
5428 (ctl->spec_flags & M_NOTIFICATION) ||
5429 ((ctl->do_not_ref_stcb == 0) &&
5430 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5431 ) {
5432 /*-
5433 * If we have a different TCB next, and there is data
5434 * present. If we have already taken some (pdapi), OR we can
5435 * ref the tcb and no delivery as started on this stream, we
5436 * take it. Note we allow a notification on a different
5437 * assoc to be delivered..
5438 */
5439 control = ctl;
5440 goto found_one;
5441 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5442 (ctl->length) &&
5443 ((ctl->some_taken) ||
5444 ((ctl->do_not_ref_stcb == 0) &&
5445 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5446 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5447 /*-
5448 * If we have the same tcb, and there is data present, and we
5449 * have the strm interleave feature present. Then if we have
5450 * taken some (pdapi) or we can refer to tht tcb AND we have
5451 * not started a delivery for this stream, we can take it.
5452 * Note we do NOT allow a notificaiton on the same assoc to
5453 * be delivered.
5454 */
5455 control = ctl;
5456 goto found_one;
5457 }
5458 ctl = TAILQ_NEXT(ctl, next);
5459 }
5460 }
5461 /*
5462 * if we reach here, not suitable replacement is available
5463 * <or> fragment interleave is NOT on. So stuff the sb_cc
5464 * into the our held count, and its time to sleep again.
5465 */
5466 held_length = so->so_rcv.sb_cc;
5467 control->held_length = so->so_rcv.sb_cc;
5468 goto restart;
5469 }
5470 /* Clear the held length since there is something to read */
5471 control->held_length = 0;
5472 if (hold_rlock) {
5473 SCTP_INP_READ_UNLOCK(inp);
5474 hold_rlock = 0;
5475 }
5476 found_one:
5477 /*
5478 * If we reach here, control has a some data for us to read off.
5479 * Note that stcb COULD be NULL.
5480 */
5481 control->some_taken++;
5482 if (hold_sblock) {
5483 SOCKBUF_UNLOCK(&so->so_rcv);
5484 hold_sblock = 0;
5485 }
5486 stcb = control->stcb;
5487 if (stcb) {
5488 if ((control->do_not_ref_stcb == 0) &&
5489 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5490 if (freecnt_applied == 0)
5491 stcb = NULL;
5492 } else if (control->do_not_ref_stcb == 0) {
5493 /* you can't free it on me please */
5494 /*
5495 * The lock on the socket buffer protects us so the
5496 * free code will stop. But since we used the
5497 * socketbuf lock and the sender uses the tcb_lock
5498 * to increment, we need to use the atomic add to
5499 * the refcnt
5500 */
5501 if (freecnt_applied) {
5502 #ifdef INVARIANTS
5503 panic("refcnt already incremented");
5504 #else
5505 SCTP_PRINTF("refcnt already incremented?\n");
5506 #endif
5507 } else {
5508 atomic_add_int(&stcb->asoc.refcnt, 1);
5509 freecnt_applied = 1;
5510 }
5511 /*
5512 * Setup to remember how much we have not yet told
5513 * the peer our rwnd has opened up. Note we grab the
5514 * value from the tcb from last time. Note too that
5515 * sack sending clears this when a sack is sent,
5516 * which is fine. Once we hit the rwnd_req, we then
5517 * will go to the sctp_user_rcvd() that will not
5518 * lock until it KNOWs it MUST send a WUP-SACK.
5519 */
5520 freed_so_far = stcb->freed_by_sorcv_sincelast;
5521 stcb->freed_by_sorcv_sincelast = 0;
5522 }
5523 }
5524 if (stcb &&
5525 ((control->spec_flags & M_NOTIFICATION) == 0) &&
5526 control->do_not_ref_stcb == 0) {
5527 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5528 }
5529 /* First lets get off the sinfo and sockaddr info */
5530 if ((sinfo) && filling_sinfo) {
5531 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5532 nxt = TAILQ_NEXT(control, next);
5533 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5534 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5535 struct sctp_extrcvinfo *s_extra;
5536
5537 s_extra = (struct sctp_extrcvinfo *)sinfo;
5538 if ((nxt) &&
5539 (nxt->length)) {
5540 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5541 if (nxt->sinfo_flags & SCTP_UNORDERED) {
5542 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5543 }
5544 if (nxt->spec_flags & M_NOTIFICATION) {
5545 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5546 }
5547 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5548 s_extra->sreinfo_next_length = nxt->length;
5549 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5550 s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5551 if (nxt->tail_mbuf != NULL) {
5552 if (nxt->end_added) {
5553 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5554 }
5555 }
5556 } else {
5557 /*
5558 * we explicitly 0 this, since the memcpy
5559 * got some other things beyond the older
5560 * sinfo_ that is on the control's structure
5561 * :-D
5562 */
5563 nxt = NULL;
5564 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5565 s_extra->sreinfo_next_aid = 0;
5566 s_extra->sreinfo_next_length = 0;
5567 s_extra->sreinfo_next_ppid = 0;
5568 s_extra->sreinfo_next_stream = 0;
5569 }
5570 }
5571 /*
5572 * update off the real current cum-ack, if we have an stcb.
5573 */
5574 if ((control->do_not_ref_stcb == 0) && stcb)
5575 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5576 /*
5577 * mask off the high bits, we keep the actual chunk bits in
5578 * there.
5579 */
5580 sinfo->sinfo_flags &= 0x00ff;
5581 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5582 sinfo->sinfo_flags |= SCTP_UNORDERED;
5583 }
5584 }
5585 #ifdef SCTP_ASOCLOG_OF_TSNS
5586 {
5587 int index, newindex;
5588 struct sctp_pcbtsn_rlog *entry;
5589
5590 do {
5591 index = inp->readlog_index;
5592 newindex = index + 1;
5593 if (newindex >= SCTP_READ_LOG_SIZE) {
5594 newindex = 0;
5595 }
5596 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5597 entry = &inp->readlog[index];
5598 entry->vtag = control->sinfo_assoc_id;
5599 entry->strm = control->sinfo_stream;
5600 entry->seq = control->sinfo_ssn;
5601 entry->sz = control->length;
5602 entry->flgs = control->sinfo_flags;
5603 }
5604 #endif
5605 if (fromlen && from) {
5606 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5607 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5608 #ifdef INET6
5609 case AF_INET6:
5610 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5611 break;
5612 #endif
5613 #ifdef INET
5614 case AF_INET:
5615 ((struct sockaddr_in *)from)->sin_port = control->port_from;
5616 break;
5617 #endif
5618 default:
5619 break;
5620 }
5621 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5622
5623 #if defined(INET) && defined(INET6)
5624 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5625 (from->sa_family == AF_INET) &&
5626 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5627 struct sockaddr_in *sin;
5628 struct sockaddr_in6 sin6;
5629
5630 sin = (struct sockaddr_in *)from;
5631 bzero(&sin6, sizeof(sin6));
5632 sin6.sin6_family = AF_INET6;
5633 sin6.sin6_len = sizeof(struct sockaddr_in6);
5634 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5635 bcopy(&sin->sin_addr,
5636 &sin6.sin6_addr.s6_addr32[3],
5637 sizeof(sin6.sin6_addr.s6_addr32[3]));
5638 sin6.sin6_port = sin->sin_port;
5639 memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5640 }
5641 #endif
5642 #ifdef INET6
5643 {
5644 struct sockaddr_in6 lsa6, *from6;
5645
5646 from6 = (struct sockaddr_in6 *)from;
5647 sctp_recover_scope_mac(from6, (&lsa6));
5648 }
5649 #endif
5650 }
5651 /* now copy out what data we can */
5652 if (mp == NULL) {
5653 /* copy out each mbuf in the chain up to length */
5654 get_more_data:
5655 m = control->data;
5656 while (m) {
5657 /* Move out all we can */
5658 cp_len = (int)uio->uio_resid;
5659 my_len = (int)SCTP_BUF_LEN(m);
5660 if (cp_len > my_len) {
5661 /* not enough in this buf */
5662 cp_len = my_len;
5663 }
5664 if (hold_rlock) {
5665 SCTP_INP_READ_UNLOCK(inp);
5666 hold_rlock = 0;
5667 }
5668 if (cp_len > 0)
5669 error = uiomove(mtod(m, char *), cp_len, uio);
5670 /* re-read */
5671 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5672 goto release;
5673 }
5674 if ((control->do_not_ref_stcb == 0) && stcb &&
5675 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5676 no_rcv_needed = 1;
5677 }
5678 if (error) {
5679 /* error we are out of here */
5680 goto release;
5681 }
5682 if ((SCTP_BUF_NEXT(m) == NULL) &&
5683 (cp_len >= SCTP_BUF_LEN(m)) &&
5684 ((control->end_added == 0) ||
5685 (control->end_added &&
5686 (TAILQ_NEXT(control, next) == NULL)))
5687 ) {
5688 SCTP_INP_READ_LOCK(inp);
5689 hold_rlock = 1;
5690 }
5691 if (cp_len == SCTP_BUF_LEN(m)) {
5692 if ((SCTP_BUF_NEXT(m) == NULL) &&
5693 (control->end_added)) {
5694 out_flags |= MSG_EOR;
5695 if ((control->do_not_ref_stcb == 0) &&
5696 (control->stcb != NULL) &&
5697 ((control->spec_flags & M_NOTIFICATION) == 0))
5698 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5699 }
5700 if (control->spec_flags & M_NOTIFICATION) {
5701 out_flags |= MSG_NOTIFICATION;
5702 }
5703 /* we ate up the mbuf */
5704 if (in_flags & MSG_PEEK) {
5705 /* just looking */
5706 m = SCTP_BUF_NEXT(m);
5707 copied_so_far += cp_len;
5708 } else {
5709 /* dispose of the mbuf */
5710 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5711 sctp_sblog(&so->so_rcv,
5712 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5713 }
5714 sctp_sbfree(control, stcb, &so->so_rcv, m);
5715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5716 sctp_sblog(&so->so_rcv,
5717 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5718 }
5719 copied_so_far += cp_len;
5720 freed_so_far += cp_len;
5721 freed_so_far += MSIZE;
5722 atomic_subtract_int(&control->length, cp_len);
5723 control->data = sctp_m_free(m);
5724 m = control->data;
5725 /*
5726 * been through it all, must hold sb
5727 * lock ok to null tail
5728 */
5729 if (control->data == NULL) {
5730 #ifdef INVARIANTS
5731 if ((control->end_added == 0) ||
5732 (TAILQ_NEXT(control, next) == NULL)) {
5733 /*
5734 * If the end is not
5735 * added, OR the
5736 * next is NOT null
5737 * we MUST have the
5738 * lock.
5739 */
5740 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5741 panic("Hmm we don't own the lock?");
5742 }
5743 }
5744 #endif
5745 control->tail_mbuf = NULL;
5746 #ifdef INVARIANTS
5747 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5748 panic("end_added, nothing left and no MSG_EOR");
5749 }
5750 #endif
5751 }
5752 }
5753 } else {
5754 /* Do we need to trim the mbuf? */
5755 if (control->spec_flags & M_NOTIFICATION) {
5756 out_flags |= MSG_NOTIFICATION;
5757 }
5758 if ((in_flags & MSG_PEEK) == 0) {
5759 SCTP_BUF_RESV_UF(m, cp_len);
5760 SCTP_BUF_LEN(m) -= cp_len;
5761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5762 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5763 }
5764 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5765 if ((control->do_not_ref_stcb == 0) &&
5766 stcb) {
5767 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5768 }
5769 copied_so_far += cp_len;
5770 freed_so_far += cp_len;
5771 freed_so_far += MSIZE;
5772 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5773 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5774 SCTP_LOG_SBRESULT, 0);
5775 }
5776 atomic_subtract_int(&control->length, cp_len);
5777 } else {
5778 copied_so_far += cp_len;
5779 }
5780 }
5781 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5782 break;
5783 }
5784 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5785 (control->do_not_ref_stcb == 0) &&
5786 (freed_so_far >= rwnd_req)) {
5787 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5788 }
5789 } /* end while(m) */
5790 /*
5791 * At this point we have looked at it all and we either have
5792 * a MSG_EOR/or read all the user wants... <OR>
5793 * control->length == 0.
5794 */
5795 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5796 /* we are done with this control */
5797 if (control->length == 0) {
5798 if (control->data) {
5799 #ifdef INVARIANTS
5800 panic("control->data not null at read eor?");
5801 #else
5802 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5803 sctp_m_freem(control->data);
5804 control->data = NULL;
5805 #endif
5806 }
5807 done_with_control:
5808 if (TAILQ_NEXT(control, next) == NULL) {
5809 /*
5810 * If we don't have a next we need a
5811 * lock, if there is a next
5812 * interrupt is filling ahead of us
5813 * and we don't need a lock to
5814 * remove this guy (which is the
5815 * head of the queue).
5816 */
5817 if (hold_rlock == 0) {
5818 SCTP_INP_READ_LOCK(inp);
5819 hold_rlock = 1;
5820 }
5821 }
5822 TAILQ_REMOVE(&inp->read_queue, control, next);
5823 /* Add back any hiddend data */
5824 if (control->held_length) {
5825 held_length = 0;
5826 control->held_length = 0;
5827 wakeup_read_socket = 1;
5828 }
5829 if (control->aux_data) {
5830 sctp_m_free(control->aux_data);
5831 control->aux_data = NULL;
5832 }
5833 no_rcv_needed = control->do_not_ref_stcb;
5834 sctp_free_remote_addr(control->whoFrom);
5835 control->data = NULL;
5836 sctp_free_a_readq(stcb, control);
5837 control = NULL;
5838 if ((freed_so_far >= rwnd_req) &&
5839 (no_rcv_needed == 0))
5840 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5841
5842 } else {
5843 /*
5844 * The user did not read all of this
5845 * message, turn off the returned MSG_EOR
5846 * since we are leaving more behind on the
5847 * control to read.
5848 */
5849 #ifdef INVARIANTS
5850 if (control->end_added &&
5851 (control->data == NULL) &&
5852 (control->tail_mbuf == NULL)) {
5853 panic("Gak, control->length is corrupt?");
5854 }
5855 #endif
5856 no_rcv_needed = control->do_not_ref_stcb;
5857 out_flags &= ~MSG_EOR;
5858 }
5859 }
5860 if (out_flags & MSG_EOR) {
5861 goto release;
5862 }
5863 if ((uio->uio_resid == 0) ||
5864 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5865 ) {
5866 goto release;
5867 }
5868 /*
5869 * If I hit here the receiver wants more and this message is
5870 * NOT done (pd-api). So two questions. Can we block? if not
5871 * we are done. Did the user NOT set MSG_WAITALL?
5872 */
5873 if (block_allowed == 0) {
5874 goto release;
5875 }
5876 /*
5877 * We need to wait for more data a few things: - We don't
5878 * sbunlock() so we don't get someone else reading. - We
5879 * must be sure to account for the case where what is added
5880 * is NOT to our control when we wakeup.
5881 */
5882
5883 /*
5884 * Do we need to tell the transport a rwnd update might be
5885 * needed before we go to sleep?
5886 */
5887 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5888 ((freed_so_far >= rwnd_req) &&
5889 (control->do_not_ref_stcb == 0) &&
5890 (no_rcv_needed == 0))) {
5891 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5892 }
5893 wait_some_more:
5894 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5895 goto release;
5896 }
5897 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5898 goto release;
5899
5900 if (hold_rlock == 1) {
5901 SCTP_INP_READ_UNLOCK(inp);
5902 hold_rlock = 0;
5903 }
5904 if (hold_sblock == 0) {
5905 SOCKBUF_LOCK(&so->so_rcv);
5906 hold_sblock = 1;
5907 }
5908 if ((copied_so_far) && (control->length == 0) &&
5909 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5910 goto release;
5911 }
5912 if (so->so_rcv.sb_cc <= control->held_length) {
5913 error = sbwait(&so->so_rcv);
5914 if (error) {
5915 goto release;
5916 }
5917 control->held_length = 0;
5918 }
5919 if (hold_sblock) {
5920 SOCKBUF_UNLOCK(&so->so_rcv);
5921 hold_sblock = 0;
5922 }
5923 if (control->length == 0) {
5924 /* still nothing here */
5925 if (control->end_added == 1) {
5926 /* he aborted, or is done i.e.did a shutdown */
5927 out_flags |= MSG_EOR;
5928 if (control->pdapi_aborted) {
5929 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5930 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5931
5932 out_flags |= MSG_TRUNC;
5933 } else {
5934 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5935 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5936 }
5937 goto done_with_control;
5938 }
5939 if (so->so_rcv.sb_cc > held_length) {
5940 control->held_length = so->so_rcv.sb_cc;
5941 held_length = 0;
5942 }
5943 goto wait_some_more;
5944 } else if (control->data == NULL) {
5945 /*
5946 * we must re-sync since data is probably being
5947 * added
5948 */
5949 SCTP_INP_READ_LOCK(inp);
5950 if ((control->length > 0) && (control->data == NULL)) {
5951 /*
5952 * big trouble.. we have the lock and its
5953 * corrupt?
5954 */
5955 #ifdef INVARIANTS
5956 panic("Impossible data==NULL length !=0");
5957 #endif
5958 out_flags |= MSG_EOR;
5959 out_flags |= MSG_TRUNC;
5960 control->length = 0;
5961 SCTP_INP_READ_UNLOCK(inp);
5962 goto done_with_control;
5963 }
5964 SCTP_INP_READ_UNLOCK(inp);
5965 /* We will fall around to get more data */
5966 }
5967 goto get_more_data;
5968 } else {
5969 /*-
5970 * Give caller back the mbuf chain,
5971 * store in uio_resid the length
5972 */
5973 wakeup_read_socket = 0;
5974 if ((control->end_added == 0) ||
5975 (TAILQ_NEXT(control, next) == NULL)) {
5976 /* Need to get rlock */
5977 if (hold_rlock == 0) {
5978 SCTP_INP_READ_LOCK(inp);
5979 hold_rlock = 1;
5980 }
5981 }
5982 if (control->end_added) {
5983 out_flags |= MSG_EOR;
5984 if ((control->do_not_ref_stcb == 0) &&
5985 (control->stcb != NULL) &&
5986 ((control->spec_flags & M_NOTIFICATION) == 0))
5987 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5988 }
5989 if (control->spec_flags & M_NOTIFICATION) {
5990 out_flags |= MSG_NOTIFICATION;
5991 }
5992 uio->uio_resid = control->length;
5993 *mp = control->data;
5994 m = control->data;
5995 while (m) {
5996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5997 sctp_sblog(&so->so_rcv,
5998 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5999 }
6000 sctp_sbfree(control, stcb, &so->so_rcv, m);
6001 freed_so_far += SCTP_BUF_LEN(m);
6002 freed_so_far += MSIZE;
6003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6004 sctp_sblog(&so->so_rcv,
6005 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6006 }
6007 m = SCTP_BUF_NEXT(m);
6008 }
6009 control->data = control->tail_mbuf = NULL;
6010 control->length = 0;
6011 if (out_flags & MSG_EOR) {
6012 /* Done with this control */
6013 goto done_with_control;
6014 }
6015 }
6016 release:
6017 if (hold_rlock == 1) {
6018 SCTP_INP_READ_UNLOCK(inp);
6019 hold_rlock = 0;
6020 }
6021 if (hold_sblock == 1) {
6022 SOCKBUF_UNLOCK(&so->so_rcv);
6023 hold_sblock = 0;
6024 }
6025 sbunlock(&so->so_rcv);
6026 sockbuf_lock = 0;
6027
6028 release_unlocked:
6029 if (hold_sblock) {
6030 SOCKBUF_UNLOCK(&so->so_rcv);
6031 hold_sblock = 0;
6032 }
6033 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6034 if ((freed_so_far >= rwnd_req) &&
6035 (control && (control->do_not_ref_stcb == 0)) &&
6036 (no_rcv_needed == 0))
6037 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6038 }
6039 out:
6040 if (msg_flags) {
6041 *msg_flags = out_flags;
6042 }
6043 if (((out_flags & MSG_EOR) == 0) &&
6044 ((in_flags & MSG_PEEK) == 0) &&
6045 (sinfo) &&
6046 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6047 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6048 struct sctp_extrcvinfo *s_extra;
6049
6050 s_extra = (struct sctp_extrcvinfo *)sinfo;
6051 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6052 }
6053 if (hold_rlock == 1) {
6054 SCTP_INP_READ_UNLOCK(inp);
6055 }
6056 if (hold_sblock) {
6057 SOCKBUF_UNLOCK(&so->so_rcv);
6058 }
6059 if (sockbuf_lock) {
6060 sbunlock(&so->so_rcv);
6061 }
6062 if (freecnt_applied) {
6063 /*
6064 * The lock on the socket buffer protects us so the free
6065 * code will stop. But since we used the socketbuf lock and
6066 * the sender uses the tcb_lock to increment, we need to use
6067 * the atomic add to the refcnt.
6068 */
6069 if (stcb == NULL) {
6070 #ifdef INVARIANTS
6071 panic("stcb for refcnt has gone NULL?");
6072 goto stage_left;
6073 #else
6074 goto stage_left;
6075 #endif
6076 }
6077 atomic_add_int(&stcb->asoc.refcnt, -1);
6078 /* Save the value back for next time */
6079 stcb->freed_by_sorcv_sincelast = freed_so_far;
6080 }
6081 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6082 if (stcb) {
6083 sctp_misc_ints(SCTP_SORECV_DONE,
6084 freed_so_far,
6085 ((uio) ? (slen - uio->uio_resid) : slen),
6086 stcb->asoc.my_rwnd,
6087 so->so_rcv.sb_cc);
6088 } else {
6089 sctp_misc_ints(SCTP_SORECV_DONE,
6090 freed_so_far,
6091 ((uio) ? (slen - uio->uio_resid) : slen),
6092 0,
6093 so->so_rcv.sb_cc);
6094 }
6095 }
6096 stage_left:
6097 if (wakeup_read_socket) {
6098 sctp_sorwakeup(inp, so);
6099 }
6100 return (error);
6101 }
6102
6103
6104 #ifdef SCTP_MBUF_LOGGING
6105 struct mbuf *
6106 sctp_m_free(struct mbuf *m)
6107 {
6108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6109 if (SCTP_BUF_IS_EXTENDED(m)) {
6110 sctp_log_mb(m, SCTP_MBUF_IFREE);
6111 }
6112 }
6113 return (m_free(m));
6114 }
6115
6116 void
6117 sctp_m_freem(struct mbuf *mb)
6118 {
6119 while (mb != NULL)
6120 mb = sctp_m_free(mb);
6121 }
6122
6123 #endif
6124
6125 int
6126 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6127 {
6128 /*
6129 * Given a local address. For all associations that holds the
6130 * address, request a peer-set-primary.
6131 */
6132 struct sctp_ifa *ifa;
6133 struct sctp_laddr *wi;
6134
6135 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6136 if (ifa == NULL) {
6137 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6138 return (EADDRNOTAVAIL);
6139 }
6140 /*
6141 * Now that we have the ifa we must awaken the iterator with this
6142 * message.
6143 */
6144 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6145 if (wi == NULL) {
6146 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6147 return (ENOMEM);
6148 }
6149 /* Now incr the count and int wi structure */
6150 SCTP_INCR_LADDR_COUNT();
6151 bzero(wi, sizeof(*wi));
6152 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6153 wi->ifa = ifa;
6154 wi->action = SCTP_SET_PRIM_ADDR;
6155 atomic_add_int(&ifa->refcount, 1);
6156
6157 /* Now add it to the work queue */
6158 SCTP_WQ_ADDR_LOCK();
6159 /*
6160 * Should this really be a tailq? As it is we will process the
6161 * newest first :-0
6162 */
6163 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6164 SCTP_WQ_ADDR_UNLOCK();
6165 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6166 (struct sctp_inpcb *)NULL,
6167 (struct sctp_tcb *)NULL,
6168 (struct sctp_nets *)NULL);
6169 return (0);
6170 }
6171
6172
6173 int
6174 sctp_soreceive(struct socket *so,
6175 struct sockaddr **psa,
6176 struct uio *uio,
6177 struct mbuf **mp0,
6178 struct mbuf **controlp,
6179 int *flagsp)
6180 {
6181 int error, fromlen;
6182 uint8_t sockbuf[256];
6183 struct sockaddr *from;
6184 struct sctp_extrcvinfo sinfo;
6185 int filling_sinfo = 1;
6186 struct sctp_inpcb *inp;
6187
6188 inp = (struct sctp_inpcb *)so->so_pcb;
6189 /* pickup the assoc we are reading from */
6190 if (inp == NULL) {
6191 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6192 return (EINVAL);
6193 }
6194 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6195 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6196 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6197 (controlp == NULL)) {
6198 /* user does not want the sndrcv ctl */
6199 filling_sinfo = 0;
6200 }
6201 if (psa) {
6202 from = (struct sockaddr *)sockbuf;
6203 fromlen = sizeof(sockbuf);
6204 from->sa_len = 0;
6205 } else {
6206 from = NULL;
6207 fromlen = 0;
6208 }
6209
6210 if (filling_sinfo) {
6211 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6212 }
6213 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6214 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6215 if (controlp != NULL) {
6216 /* copy back the sinfo in a CMSG format */
6217 if (filling_sinfo)
6218 *controlp = sctp_build_ctl_nchunk(inp,
6219 (struct sctp_sndrcvinfo *)&sinfo);
6220 else
6221 *controlp = NULL;
6222 }
6223 if (psa) {
6224 /* copy back the address info */
6225 if (from && from->sa_len) {
6226 *psa = sodupsockaddr(from, M_NOWAIT);
6227 } else {
6228 *psa = NULL;
6229 }
6230 }
6231 return (error);
6232 }
6233
6234
6235
6236
6237
6238 int
6239 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6240 int totaddr, int *error)
6241 {
6242 int added = 0;
6243 int i;
6244 struct sctp_inpcb *inp;
6245 struct sockaddr *sa;
6246 size_t incr = 0;
6247
6248 #ifdef INET
6249 struct sockaddr_in *sin;
6250
6251 #endif
6252 #ifdef INET6
6253 struct sockaddr_in6 *sin6;
6254
6255 #endif
6256
6257 sa = addr;
6258 inp = stcb->sctp_ep;
6259 *error = 0;
6260 for (i = 0; i < totaddr; i++) {
6261 switch (sa->sa_family) {
6262 #ifdef INET
6263 case AF_INET:
6264 incr = sizeof(struct sockaddr_in);
6265 sin = (struct sockaddr_in *)sa;
6266 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6267 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6268 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6269 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6270 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6271 *error = EINVAL;
6272 goto out_now;
6273 }
6274 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6275 /* assoc gone no un-lock */
6276 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6277 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6278 *error = ENOBUFS;
6279 goto out_now;
6280 }
6281 added++;
6282 break;
6283 #endif
6284 #ifdef INET6
6285 case AF_INET6:
6286 incr = sizeof(struct sockaddr_in6);
6287 sin6 = (struct sockaddr_in6 *)sa;
6288 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6289 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6290 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6291 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6292 *error = EINVAL;
6293 goto out_now;
6294 }
6295 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6296 /* assoc gone no un-lock */
6297 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6298 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6299 *error = ENOBUFS;
6300 goto out_now;
6301 }
6302 added++;
6303 break;
6304 #endif
6305 default:
6306 break;
6307 }
6308 sa = (struct sockaddr *)((caddr_t)sa + incr);
6309 }
6310 out_now:
6311 return (added);
6312 }
6313
6314 struct sctp_tcb *
6315 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6316 int *totaddr, int *num_v4, int *num_v6, int *error,
6317 int limit, int *bad_addr)
6318 {
6319 struct sockaddr *sa;
6320 struct sctp_tcb *stcb = NULL;
6321 size_t incr, at, i;
6322
6323 at = incr = 0;
6324 sa = addr;
6325
6326 *error = *num_v6 = *num_v4 = 0;
6327 /* account and validate addresses */
6328 for (i = 0; i < (size_t)*totaddr; i++) {
6329 switch (sa->sa_family) {
6330 #ifdef INET
6331 case AF_INET:
6332 (*num_v4) += 1;
6333 incr = sizeof(struct sockaddr_in);
6334 if (sa->sa_len != incr) {
6335 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6336 *error = EINVAL;
6337 *bad_addr = 1;
6338 return (NULL);
6339 }
6340 break;
6341 #endif
6342 #ifdef INET6
6343 case AF_INET6:
6344 {
6345 struct sockaddr_in6 *sin6;
6346
6347 sin6 = (struct sockaddr_in6 *)sa;
6348 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6349 /* Must be non-mapped for connectx */
6350 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351 *error = EINVAL;
6352 *bad_addr = 1;
6353 return (NULL);
6354 }
6355 (*num_v6) += 1;
6356 incr = sizeof(struct sockaddr_in6);
6357 if (sa->sa_len != incr) {
6358 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6359 *error = EINVAL;
6360 *bad_addr = 1;
6361 return (NULL);
6362 }
6363 break;
6364 }
6365 #endif
6366 default:
6367 *totaddr = i;
6368 /* we are done */
6369 break;
6370 }
6371 if (i == (size_t)*totaddr) {
6372 break;
6373 }
6374 SCTP_INP_INCR_REF(inp);
6375 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6376 if (stcb != NULL) {
6377 /* Already have or am bring up an association */
6378 return (stcb);
6379 } else {
6380 SCTP_INP_DECR_REF(inp);
6381 }
6382 if ((at + incr) > (size_t)limit) {
6383 *totaddr = i;
6384 break;
6385 }
6386 sa = (struct sockaddr *)((caddr_t)sa + incr);
6387 }
6388 return ((struct sctp_tcb *)NULL);
6389 }
6390
6391 /*
6392 * sctp_bindx(ADD) for one address.
6393 * assumes all arguments are valid/checked by caller.
6394 */
6395 void
6396 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6397 struct sockaddr *sa, sctp_assoc_t assoc_id,
6398 uint32_t vrf_id, int *error, void *p)
6399 {
6400 struct sockaddr *addr_touse;
6401
6402 #ifdef INET6
6403 struct sockaddr_in sin;
6404
6405 #endif
6406
6407 /* see if we're bound all already! */
6408 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6409 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 *error = EINVAL;
6411 return;
6412 }
6413 addr_touse = sa;
6414 #ifdef INET6
6415 if (sa->sa_family == AF_INET6) {
6416 struct sockaddr_in6 *sin6;
6417
6418 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6420 *error = EINVAL;
6421 return;
6422 }
6423 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6424 /* can only bind v6 on PF_INET6 sockets */
6425 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6426 *error = EINVAL;
6427 return;
6428 }
6429 sin6 = (struct sockaddr_in6 *)addr_touse;
6430 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6431 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6432 SCTP_IPV6_V6ONLY(inp)) {
6433 /* can't bind v4-mapped on PF_INET sockets */
6434 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6435 *error = EINVAL;
6436 return;
6437 }
6438 in6_sin6_2_sin(&sin, sin6);
6439 addr_touse = (struct sockaddr *)&sin;
6440 }
6441 }
6442 #endif
6443 #ifdef INET
6444 if (sa->sa_family == AF_INET) {
6445 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6446 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 *error = EINVAL;
6448 return;
6449 }
6450 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6451 SCTP_IPV6_V6ONLY(inp)) {
6452 /* can't bind v4 on PF_INET sockets */
6453 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 *error = EINVAL;
6455 return;
6456 }
6457 }
6458 #endif
6459 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6460 if (p == NULL) {
6461 /* Can't get proc for Net/Open BSD */
6462 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6463 *error = EINVAL;
6464 return;
6465 }
6466 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6467 return;
6468 }
6469 /*
6470 * No locks required here since bind and mgmt_ep_sa all do their own
6471 * locking. If we do something for the FIX: below we may need to
6472 * lock in that case.
6473 */
6474 if (assoc_id == 0) {
6475 /* add the address */
6476 struct sctp_inpcb *lep;
6477 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6478
6479 /* validate the incoming port */
6480 if ((lsin->sin_port != 0) &&
6481 (lsin->sin_port != inp->sctp_lport)) {
6482 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6483 *error = EINVAL;
6484 return;
6485 } else {
6486 /* user specified 0 port, set it to existing port */
6487 lsin->sin_port = inp->sctp_lport;
6488 }
6489
6490 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6491 if (lep != NULL) {
6492 /*
6493 * We must decrement the refcount since we have the
6494 * ep already and are binding. No remove going on
6495 * here.
6496 */
6497 SCTP_INP_DECR_REF(lep);
6498 }
6499 if (lep == inp) {
6500 /* already bound to it.. ok */
6501 return;
6502 } else if (lep == NULL) {
6503 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
6504 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6505 SCTP_ADD_IP_ADDRESS,
6506 vrf_id, NULL);
6507 } else {
6508 *error = EADDRINUSE;
6509 }
6510 if (*error)
6511 return;
6512 } else {
6513 /*
6514 * FIX: decide whether we allow assoc based bindx
6515 */
6516 }
6517 }
6518
6519 /*
6520 * sctp_bindx(DELETE) for one address.
6521 * assumes all arguments are valid/checked by caller.
6522 */
6523 void
6524 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6525 struct sockaddr *sa, sctp_assoc_t assoc_id,
6526 uint32_t vrf_id, int *error)
6527 {
6528 struct sockaddr *addr_touse;
6529
6530 #ifdef INET6
6531 struct sockaddr_in sin;
6532
6533 #endif
6534
6535 /* see if we're bound all already! */
6536 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6537 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6538 *error = EINVAL;
6539 return;
6540 }
6541 addr_touse = sa;
6542 #ifdef INET6
6543 if (sa->sa_family == AF_INET6) {
6544 struct sockaddr_in6 *sin6;
6545
6546 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6547 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 *error = EINVAL;
6549 return;
6550 }
6551 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6552 /* can only bind v6 on PF_INET6 sockets */
6553 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554 *error = EINVAL;
6555 return;
6556 }
6557 sin6 = (struct sockaddr_in6 *)addr_touse;
6558 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6559 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6560 SCTP_IPV6_V6ONLY(inp)) {
6561 /* can't bind mapped-v4 on PF_INET sockets */
6562 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 *error = EINVAL;
6564 return;
6565 }
6566 in6_sin6_2_sin(&sin, sin6);
6567 addr_touse = (struct sockaddr *)&sin;
6568 }
6569 }
6570 #endif
6571 #ifdef INET
6572 if (sa->sa_family == AF_INET) {
6573 if (sa->sa_len != sizeof(struct sockaddr_in)) {
6574 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6575 *error = EINVAL;
6576 return;
6577 }
6578 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6579 SCTP_IPV6_V6ONLY(inp)) {
6580 /* can't bind v4 on PF_INET sockets */
6581 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6582 *error = EINVAL;
6583 return;
6584 }
6585 }
6586 #endif
6587 /*
6588 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6589 * below is ever changed we may need to lock before calling
6590 * association level binding.
6591 */
6592 if (assoc_id == 0) {
6593 /* delete the address */
6594 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6595 SCTP_DEL_IP_ADDRESS,
6596 vrf_id, NULL);
6597 } else {
6598 /*
6599 * FIX: decide whether we allow assoc based bindx
6600 */
6601 }
6602 }
6603
6604 /*
6605 * returns the valid local address count for an assoc, taking into account
6606 * all scoping rules
6607 */
6608 int
6609 sctp_local_addr_count(struct sctp_tcb *stcb)
6610 {
6611 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6612 int ipv4_addr_legal, ipv6_addr_legal;
6613 struct sctp_vrf *vrf;
6614 struct sctp_ifn *sctp_ifn;
6615 struct sctp_ifa *sctp_ifa;
6616 int count = 0;
6617
6618 /* Turn on all the appropriate scopes */
6619 loopback_scope = stcb->asoc.scope.loopback_scope;
6620 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6621 local_scope = stcb->asoc.scope.local_scope;
6622 site_scope = stcb->asoc.scope.site_scope;
6623 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6624 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6625 SCTP_IPI_ADDR_RLOCK();
6626 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6627 if (vrf == NULL) {
6628 /* no vrf, no addresses */
6629 SCTP_IPI_ADDR_RUNLOCK();
6630 return (0);
6631 }
6632 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6633 /*
6634 * bound all case: go through all ifns on the vrf
6635 */
6636 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6637 if ((loopback_scope == 0) &&
6638 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6639 continue;
6640 }
6641 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6642 if (sctp_is_addr_restricted(stcb, sctp_ifa))
6643 continue;
6644 switch (sctp_ifa->address.sa.sa_family) {
6645 #ifdef INET
6646 case AF_INET:
6647 if (ipv4_addr_legal) {
6648 struct sockaddr_in *sin;
6649
6650 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6651 if (sin->sin_addr.s_addr == 0) {
6652 /*
6653 * skip unspecified
6654 * addrs
6655 */
6656 continue;
6657 }
6658 if ((ipv4_local_scope == 0) &&
6659 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6660 continue;
6661 }
6662 /* count this one */
6663 count++;
6664 } else {
6665 continue;
6666 }
6667 break;
6668 #endif
6669 #ifdef INET6
6670 case AF_INET6:
6671 if (ipv6_addr_legal) {
6672 struct sockaddr_in6 *sin6;
6673
6674 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6675 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6676 continue;
6677 }
6678 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6679 if (local_scope == 0)
6680 continue;
6681 if (sin6->sin6_scope_id == 0) {
6682 if (sa6_recoverscope(sin6) != 0)
6683 /*
6684 *
6685 * bad
6686 *
6687 * li
6688 * nk
6689 *
6690 * loc
6691 * al
6692 *
6693 * add
6694 * re
6695 * ss
6696 * */
6697 continue;
6698 }
6699 }
6700 if ((site_scope == 0) &&
6701 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6702 continue;
6703 }
6704 /* count this one */
6705 count++;
6706 }
6707 break;
6708 #endif
6709 default:
6710 /* TSNH */
6711 break;
6712 }
6713 }
6714 }
6715 } else {
6716 /*
6717 * subset bound case
6718 */
6719 struct sctp_laddr *laddr;
6720
6721 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6722 sctp_nxt_addr) {
6723 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6724 continue;
6725 }
6726 /* count this one */
6727 count++;
6728 }
6729 }
6730 SCTP_IPI_ADDR_RUNLOCK();
6731 return (count);
6732 }
6733
6734 #if defined(SCTP_LOCAL_TRACE_BUF)
6735
6736 void
6737 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6738 {
6739 uint32_t saveindex, newindex;
6740
6741 do {
6742 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6743 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6744 newindex = 1;
6745 } else {
6746 newindex = saveindex + 1;
6747 }
6748 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6749 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6750 saveindex = 0;
6751 }
6752 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6753 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6754 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6755 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6756 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6757 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6758 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6759 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6760 }
6761
6762 #endif
6763 static void
6764 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6765 {
6766 struct ip *iph;
6767
6768 #ifdef INET6
6769 struct ip6_hdr *ip6;
6770
6771 #endif
6772 struct mbuf *sp, *last;
6773 struct udphdr *uhdr;
6774 uint16_t port;
6775
6776 if ((m->m_flags & M_PKTHDR) == 0) {
6777 /* Can't handle one that is not a pkt hdr */
6778 goto out;
6779 }
6780 /* Pull the src port */
6781 iph = mtod(m, struct ip *);
6782 uhdr = (struct udphdr *)((caddr_t)iph + off);
6783 port = uhdr->uh_sport;
6784 /*
6785 * Split out the mbuf chain. Leave the IP header in m, place the
6786 * rest in the sp.
6787 */
6788 sp = m_split(m, off, M_DONTWAIT);
6789 if (sp == NULL) {
6790 /* Gak, drop packet, we can't do a split */
6791 goto out;
6792 }
6793 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6794 /* Gak, packet can't have an SCTP header in it - too small */
6795 m_freem(sp);
6796 goto out;
6797 }
6798 /* Now pull up the UDP header and SCTP header together */
6799 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6800 if (sp == NULL) {
6801 /* Gak pullup failed */
6802 goto out;
6803 }
6804 /* Trim out the UDP header */
6805 m_adj(sp, sizeof(struct udphdr));
6806
6807 /* Now reconstruct the mbuf chain */
6808 for (last = m; last->m_next; last = last->m_next);
6809 last->m_next = sp;
6810 m->m_pkthdr.len += sp->m_pkthdr.len;
6811 iph = mtod(m, struct ip *);
6812 switch (iph->ip_v) {
6813 #ifdef INET
6814 case IPVERSION:
6815 iph->ip_len -= sizeof(struct udphdr);
6816 sctp_input_with_port(m, off, port);
6817 break;
6818 #endif
6819 #ifdef INET6
6820 case IPV6_VERSION >> 4:
6821 ip6 = mtod(m, struct ip6_hdr *);
6822 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6823 sctp6_input_with_port(&m, &off, port);
6824 break;
6825 #endif
6826 default:
6827 goto out;
6828 break;
6829 }
6830 return;
6831 out:
6832 m_freem(m);
6833 }
6834
6835 void
6836 sctp_over_udp_stop(void)
6837 {
6838 /*
6839 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6840 * for writting!
6841 */
6842 #ifdef INET
6843 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6844 soclose(SCTP_BASE_INFO(udp4_tun_socket));
6845 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6846 }
6847 #endif
6848 #ifdef INET6
6849 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6850 soclose(SCTP_BASE_INFO(udp6_tun_socket));
6851 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6852 }
6853 #endif
6854 }
6855
6856 int
6857 sctp_over_udp_start(void)
6858 {
6859 uint16_t port;
6860 int ret;
6861
6862 #ifdef INET
6863 struct sockaddr_in sin;
6864
6865 #endif
6866 #ifdef INET6
6867 struct sockaddr_in6 sin6;
6868
6869 #endif
6870 /*
6871 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6872 * for writting!
6873 */
6874 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6875 if (ntohs(port) == 0) {
6876 /* Must have a port set */
6877 return (EINVAL);
6878 }
6879 #ifdef INET
6880 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6881 /* Already running -- must stop first */
6882 return (EALREADY);
6883 }
6884 #endif
6885 #ifdef INET6
6886 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6887 /* Already running -- must stop first */
6888 return (EALREADY);
6889 }
6890 #endif
6891 #ifdef INET
6892 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6893 SOCK_DGRAM, IPPROTO_UDP,
6894 curthread->td_ucred, curthread))) {
6895 sctp_over_udp_stop();
6896 return (ret);
6897 }
6898 /* Call the special UDP hook. */
6899 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6900 sctp_recv_udp_tunneled_packet))) {
6901 sctp_over_udp_stop();
6902 return (ret);
6903 }
6904 /* Ok, we have a socket, bind it to the port. */
6905 memset(&sin, 0, sizeof(struct sockaddr_in));
6906 sin.sin_len = sizeof(struct sockaddr_in);
6907 sin.sin_family = AF_INET;
6908 sin.sin_port = htons(port);
6909 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6910 (struct sockaddr *)&sin, curthread))) {
6911 sctp_over_udp_stop();
6912 return (ret);
6913 }
6914 #endif
6915 #ifdef INET6
6916 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6917 SOCK_DGRAM, IPPROTO_UDP,
6918 curthread->td_ucred, curthread))) {
6919 sctp_over_udp_stop();
6920 return (ret);
6921 }
6922 /* Call the special UDP hook. */
6923 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6924 sctp_recv_udp_tunneled_packet))) {
6925 sctp_over_udp_stop();
6926 return (ret);
6927 }
6928 /* Ok, we have a socket, bind it to the port. */
6929 memset(&sin6, 0, sizeof(struct sockaddr_in6));
6930 sin6.sin6_len = sizeof(struct sockaddr_in6);
6931 sin6.sin6_family = AF_INET6;
6932 sin6.sin6_port = htons(port);
6933 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6934 (struct sockaddr *)&sin6, curthread))) {
6935 sctp_over_udp_stop();
6936 return (ret);
6937 }
6938 #endif
6939 return (0);
6940 }
Cache object: 921c0c022fd6e2533a6a6ef732a50505
|