1 /* $KAME: sctputil.c,v 1.36 2005/03/06 16:04:19 itojun Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Cisco Systems, Inc.
18 * 4. Neither the name of the project nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
37 #endif
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
41 #include "opt_inet.h"
42 #if !(defined(SCTP_BASE_FREEBSD) || defined(__DragonFly__))
43 #include "opt_mpath.h"
44 #endif /* SCTP_BASE_FREEBSD || __DragonFly__ */
45 #endif /* FreeBSD */
46 #if defined(__NetBSD__)
47 #include "opt_inet.h"
48 #endif
49 #ifdef __APPLE__
50 #include <sctp.h>
51 #elif !defined(__OpenBSD__)
52 #include "opt_sctp.h"
53 #endif
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/proc.h>
64 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
66 #include <sys/thread2.h>
67
68 #if defined(__FreeBSD__) || defined(__DragonFly__)
69 #include <sys/callout.h>
70 #else
71 #include <netinet/sctp_callout.h> /* for callout_active() */
72 #endif
73
74 #include <net/radix.h>
75 #include <net/route.h>
76
77 #ifdef INET6
78 #ifndef __OpenBSD__
79 #include <sys/domain.h>
80 #endif
81 #endif
82
83 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
84 #include <sys/limits.h>
85 #else
86 #include <machine/limits.h>
87 #endif
88
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip_var.h>
99
100 #ifdef INET6
101 #include <netinet/ip6.h>
102 #include <netinet6/ip6_var.h>
103
104 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__DragonFly_)
105 #include <netinet6/in6_pcb.h>
106 #elif defined(__OpenBSD__)
107 #include <netinet/in_pcb.h>
108 #endif
109
110 #endif /* INET6 */
111
112 #include <netinet/sctp_pcb.h>
113
114 #ifdef IPSEC
115 #ifndef __OpenBSD__
116 #include <netinet6/ipsec.h>
117 #include <netproto/key/key.h>
118 #else
119 #undef IPSEC
120 #endif
121 #endif /* IPSEC */
122
123 #include <netinet/sctputil.h>
124 #include <netinet/sctp_var.h>
125 #ifdef INET6
126 #include <netinet6/sctp6_var.h>
127 #endif
128 #include <netinet/sctp_header.h>
129 #include <netinet/sctp_output.h>
130 #include <netinet/sctp_hashdriver.h>
131 #include <netinet/sctp_uio.h>
132 #include <netinet/sctp_timer.h>
133 #include <netinet/sctp_crc32.h>
134 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
135 #define NUMBER_OF_MTU_SIZES 18
136
137 #ifdef SCTP_DEBUG
138 extern u_int32_t sctp_debug_on;
139 #endif
140
141 #ifdef SCTP_STAT_LOGGING
142 int sctp_cwnd_log_at=0;
143 int sctp_cwnd_log_rolled=0;
144 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
145
146 void
147 sctp_clr_stat_log(void)
148 {
149 sctp_cwnd_log_at=0;
150 sctp_cwnd_log_rolled=0;
151 }
152
153 void
154 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
155 {
156
157 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
158 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
159 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
160 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
161 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
162 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
163 sctp_cwnd_log_at++;
164 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
165 sctp_cwnd_log_at = 0;
166 sctp_cwnd_log_rolled = 1;
167 }
168
169
170 }
171
172 void
173 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
174 {
175
176 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
177 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
178 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
179 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
180 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
181 sctp_cwnd_log_at++;
182 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
183 sctp_cwnd_log_at = 0;
184 sctp_cwnd_log_rolled = 1;
185 }
186 }
187
188 void
189 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
190 int from)
191 {
192
193 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
194 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
195 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
196 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
197 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
198 sctp_cwnd_log_at++;
199 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
200 sctp_cwnd_log_at = 0;
201 sctp_cwnd_log_rolled = 1;
202 }
203 }
204
205 void
206 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
207 int from)
208 {
209
210 if (chk == NULL) {
211 kprintf("Gak log of NULL?\n");
212 return;
213 }
214 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
215 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
216 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
217 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
218 if (poschk != NULL) {
219 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
220 poschk->rec.data.TSN_seq;
221 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
222 poschk->rec.data.stream_seq;
223 } else {
224 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
225 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
226 }
227 sctp_cwnd_log_at++;
228 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
229 sctp_cwnd_log_at = 0;
230 sctp_cwnd_log_rolled = 1;
231 }
232 }
233
234 void
235 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
236 {
237
238 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
239 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
240 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
241 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
242 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
243 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
244 sctp_cwnd_log_at++;
245 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
246 sctp_cwnd_log_at = 0;
247 sctp_cwnd_log_rolled = 1;
248 }
249 }
250
251 void
252 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
253 {
254 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
255 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
256 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
257 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
258 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
259 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
260 sctp_cwnd_log_at++;
261 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
262 sctp_cwnd_log_at = 0;
263 sctp_cwnd_log_rolled = 1;
264 }
265 }
266
267 void
268 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
269 {
270 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
271 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
272 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
273 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
274 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
275 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
276 sctp_cwnd_log_at++;
277 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
278 sctp_cwnd_log_at = 0;
279 sctp_cwnd_log_rolled = 1;
280 }
281 }
282
283 void
284 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
285 {
286 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
287 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
288 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
289 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
290 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
291 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
292 sctp_cwnd_log_at++;
293 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
294 sctp_cwnd_log_at = 0;
295 sctp_cwnd_log_rolled = 1;
296 }
297 }
298
299 void
300 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
301 {
302 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
303 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
304 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
305 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
306 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
307 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
308 sctp_cwnd_log_at++;
309 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
310 sctp_cwnd_log_at = 0;
311 sctp_cwnd_log_rolled = 1;
312 }
313 }
314
315 void
316 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
317 {
318
319 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
320 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
321 sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.ssb_mbmax/1024);
322 sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
323 sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.ssb_hiwat/1024);
324 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
325 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
326 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
327 sctp_cwnd_log_at++;
328 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
329 sctp_cwnd_log_at = 0;
330 sctp_cwnd_log_rolled = 1;
331 }
332 }
333
334 int
335 sctp_fill_stat_log(struct mbuf *m)
336 {
337 struct sctp_cwnd_log_req *req;
338 int size_limit, num, i, at, cnt_out=0;
339
340 if (m == NULL)
341 return (EINVAL);
342
343 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
344 if (size_limit < sizeof(struct sctp_cwnd_log)) {
345 return (EINVAL);
346 }
347 req = mtod(m, struct sctp_cwnd_log_req *);
348 num = size_limit/sizeof(struct sctp_cwnd_log);
349 if (sctp_cwnd_log_rolled) {
350 req->num_in_log = SCTP_STAT_LOG_SIZE;
351 } else {
352 req->num_in_log = sctp_cwnd_log_at;
353 /* if the log has not rolled, we don't
354 * let you have old data.
355 */
356 if (req->end_at > sctp_cwnd_log_at) {
357 req->end_at = sctp_cwnd_log_at;
358 }
359 }
360 if ((num < SCTP_STAT_LOG_SIZE) &&
361 ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
362 /* we can't return all of it */
363 if (((req->start_at == 0) && (req->end_at == 0)) ||
364 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
365 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
366 /* No user request or user is wacked. */
367 req->num_ret = num;
368 req->end_at = sctp_cwnd_log_at - 1;
369 if ((sctp_cwnd_log_at - num) < 0) {
370 int cc;
371 cc = num - sctp_cwnd_log_at;
372 req->start_at = SCTP_STAT_LOG_SIZE - cc;
373 } else {
374 req->start_at = sctp_cwnd_log_at - num;
375 }
376 } else {
377 /* a user request */
378 int cc;
379 if (req->start_at > req->end_at) {
380 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
381 (req->end_at + 1);
382 } else {
383
384 cc = req->end_at - req->start_at;
385 }
386 if (cc < num) {
387 num = cc;
388 }
389 req->num_ret = num;
390 }
391 } else {
392 /* We can return all of it */
393 req->start_at = 0;
394 req->end_at = sctp_cwnd_log_at - 1;
395 req->num_ret = sctp_cwnd_log_at;
396 }
397 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
398 req->log[i] = sctp_clog[at];
399 cnt_out++;
400 at++;
401 if (at >= SCTP_STAT_LOG_SIZE)
402 at = 0;
403 }
404 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
405 return (0);
406 }
407
408 #endif
409
410 #ifdef SCTP_AUDITING_ENABLED
411 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
412 static int sctp_audit_indx = 0;
413
414 static void
415 sctp_print_audit_report(void)
416 {
417 int i;
418 int cnt;
419 cnt = 0;
420 for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
421 if ((sctp_audit_data[i][0] == 0xe0) &&
422 (sctp_audit_data[i][1] == 0x01)) {
423 cnt = 0;
424 kprintf("\n");
425 } else if (sctp_audit_data[i][0] == 0xf0) {
426 cnt = 0;
427 kprintf("\n");
428 } else if ((sctp_audit_data[i][0] == 0xc0) &&
429 (sctp_audit_data[i][1] == 0x01)) {
430 kprintf("\n");
431 cnt = 0;
432 }
433 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
434 (uint32_t)sctp_audit_data[i][1]);
435 cnt++;
436 if ((cnt % 14) == 0)
437 kprintf("\n");
438 }
439 for (i=0;i<sctp_audit_indx;i++) {
440 if ((sctp_audit_data[i][0] == 0xe0) &&
441 (sctp_audit_data[i][1] == 0x01)) {
442 cnt = 0;
443 kprintf("\n");
444 } else if (sctp_audit_data[i][0] == 0xf0) {
445 cnt = 0;
446 kprintf("\n");
447 } else if ((sctp_audit_data[i][0] == 0xc0) &&
448 (sctp_audit_data[i][1] == 0x01)) {
449 kprintf("\n");
450 cnt = 0;
451 }
452 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
453 (uint32_t)sctp_audit_data[i][1]);
454 cnt++;
455 if ((cnt % 14) == 0)
456 kprintf("\n");
457 }
458 kprintf("\n");
459 }
460
461 void
462 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
463 struct sctp_nets *net)
464 {
465 int resend_cnt, tot_out, rep, tot_book_cnt;
466 struct sctp_nets *lnet;
467 struct sctp_tmit_chunk *chk;
468
469 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
470 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
471 sctp_audit_indx++;
472 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
473 sctp_audit_indx = 0;
474 }
475 if (inp == NULL) {
476 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
477 sctp_audit_data[sctp_audit_indx][1] = 0x01;
478 sctp_audit_indx++;
479 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
480 sctp_audit_indx = 0;
481 }
482 return;
483 }
484 if (stcb == NULL) {
485 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
486 sctp_audit_data[sctp_audit_indx][1] = 0x02;
487 sctp_audit_indx++;
488 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
489 sctp_audit_indx = 0;
490 }
491 return;
492 }
493 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
494 sctp_audit_data[sctp_audit_indx][1] =
495 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
496 sctp_audit_indx++;
497 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
498 sctp_audit_indx = 0;
499 }
500 rep = 0;
501 tot_book_cnt = 0;
502 resend_cnt = tot_out = 0;
503 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
504 if (chk->sent == SCTP_DATAGRAM_RESEND) {
505 resend_cnt++;
506 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
507 tot_out += chk->book_size;
508 tot_book_cnt++;
509 }
510 }
511 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
512 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
514 sctp_audit_indx++;
515 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 sctp_audit_indx = 0;
517 }
518 kprintf("resend_cnt:%d asoc-tot:%d\n",
519 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
520 rep = 1;
521 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
522 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
523 sctp_audit_data[sctp_audit_indx][1] =
524 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
525 sctp_audit_indx++;
526 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
527 sctp_audit_indx = 0;
528 }
529 }
530 if (tot_out != stcb->asoc.total_flight) {
531 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
532 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
533 sctp_audit_indx++;
534 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
535 sctp_audit_indx = 0;
536 }
537 rep = 1;
538 kprintf("tot_flt:%d asoc_tot:%d\n", tot_out,
539 (int)stcb->asoc.total_flight);
540 stcb->asoc.total_flight = tot_out;
541 }
542 if (tot_book_cnt != stcb->asoc.total_flight_count) {
543 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
544 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
545 sctp_audit_indx++;
546 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
547 sctp_audit_indx = 0;
548 }
549 rep = 1;
550 kprintf("tot_flt_book:%d\n", tot_book);
551
552 stcb->asoc.total_flight_count = tot_book_cnt;
553 }
554 tot_out = 0;
555 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
556 tot_out += lnet->flight_size;
557 }
558 if (tot_out != stcb->asoc.total_flight) {
559 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
560 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
561 sctp_audit_indx++;
562 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
563 sctp_audit_indx = 0;
564 }
565 rep = 1;
566 kprintf("real flight:%d net total was %d\n",
567 stcb->asoc.total_flight, tot_out);
568 /* now corrective action */
569 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
570 tot_out = 0;
571 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
572 if ((chk->whoTo == lnet) &&
573 (chk->sent < SCTP_DATAGRAM_RESEND)) {
574 tot_out += chk->book_size;
575 }
576 }
577 if (lnet->flight_size != tot_out) {
578 kprintf("net:%x flight was %d corrected to %d\n",
579 (uint32_t)lnet, lnet->flight_size, tot_out);
580 lnet->flight_size = tot_out;
581 }
582
583 }
584 }
585
586 if (rep) {
587 sctp_print_audit_report();
588 }
589 }
590
591 void
592 sctp_audit_log(u_int8_t ev, u_int8_t fd)
593 {
594 crit_enter();
595 sctp_audit_data[sctp_audit_indx][0] = ev;
596 sctp_audit_data[sctp_audit_indx][1] = fd;
597 sctp_audit_indx++;
598 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
599 sctp_audit_indx = 0;
600 }
601 crit_exit();
602 }
603
604 #endif
605
606 /*
607 * a list of sizes based on typical mtu's, used only if next hop
608 * size not returned.
609 */
610 static int sctp_mtu_sizes[] = {
611 68,
612 296,
613 508,
614 512,
615 544,
616 576,
617 1006,
618 1492,
619 1500,
620 1536,
621 2002,
622 2048,
623 4352,
624 4464,
625 8166,
626 17914,
627 32000,
628 65535
629 };
630
631 int
632 find_next_best_mtu(int totsz)
633 {
634 int i, perfer;
635 /*
636 * if we are in here we must find the next best fit based on the
637 * size of the dg that failed to be sent.
638 */
639 perfer = 0;
640 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
641 if (totsz < sctp_mtu_sizes[i]) {
642 perfer = i - 1;
643 if (perfer < 0)
644 perfer = 0;
645 break;
646 }
647 }
648 return (sctp_mtu_sizes[perfer]);
649 }
650
651 void
652 sctp_fill_random_store(struct sctp_pcb *m)
653 {
654 /*
655 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
656 * and our counter. The result becomes our good random numbers and
657 * we then setup to give these out. Note that we do no lockig
658 * to protect this. This is ok, since if competing folks call
659 * this we will get more gobbled gook in the random store whic
660 * is what we want. There is a danger that two guys will use
661 * the same random numbers, but thats ok too since that
662 * is random as well :->
663 */
664 m->store_at = 0;
665 sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
666 (char *)&m->random_counter, sizeof(m->random_counter),
667 (char *)m->random_store);
668 m->random_counter++;
669 }
670
671 uint32_t
672 sctp_select_initial_TSN(struct sctp_pcb *m)
673 {
674 /*
675 * A true implementation should use random selection process to
676 * get the initial stream sequence number, using RFC1750 as a
677 * good guideline
678 */
679 u_long x, *xp;
680 uint8_t *p;
681
682 if (m->initial_sequence_debug != 0) {
683 u_int32_t ret;
684 ret = m->initial_sequence_debug;
685 m->initial_sequence_debug++;
686 return (ret);
687 }
688 if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
689 /* Refill the random store */
690 sctp_fill_random_store(m);
691 }
692 p = &m->random_store[(int)m->store_at];
693 xp = (u_long *)p;
694 x = *xp;
695 m->store_at += sizeof(u_long);
696 return (x);
697 }
698
699 u_int32_t
700 sctp_select_a_tag(struct sctp_inpcb *m)
701 {
702 u_long x, not_done;
703 struct timeval now;
704
705 SCTP_GETTIME_TIMEVAL(&now);
706 not_done = 1;
707 while (not_done) {
708 x = sctp_select_initial_TSN(&m->sctp_ep);
709 if (x == 0) {
710 /* we never use 0 */
711 continue;
712 }
713 if (sctp_is_vtag_good(m, x, &now)) {
714 not_done = 0;
715 }
716 }
717 return (x);
718 }
719
720
721 int
722 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
723 int for_a_init, uint32_t override_tag )
724 {
725 /*
726 * Anything set to zero is taken care of by the allocation
727 * routine's bzero
728 */
729
730 /*
731 * Up front select what scoping to apply on addresses I tell my peer
732 * Not sure what to do with these right now, we will need to come up
733 * with a way to set them. We may need to pass them through from the
734 * caller in the sctp_aloc_assoc() function.
735 */
736 int i;
737 /* init all variables to a known value.*/
738 asoc->state = SCTP_STATE_INUSE;
739 asoc->max_burst = m->sctp_ep.max_burst;
740 asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
741 asoc->cookie_life = m->sctp_ep.def_cookie_life;
742
743 if (override_tag) {
744 asoc->my_vtag = override_tag;
745 } else {
746 asoc->my_vtag = sctp_select_a_tag(m);
747 }
748 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
749 sctp_select_initial_TSN(&m->sctp_ep);
750 asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
751 /* we are opptimisitic here */
752 asoc->peer_supports_asconf = 1;
753 asoc->peer_supports_asconf_setprim = 1;
754 asoc->peer_supports_pktdrop = 1;
755
756 asoc->sent_queue_retran_cnt = 0;
757 /* This will need to be adjusted */
758 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
759 asoc->last_acked_seq = asoc->init_seq_number - 1;
760 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
761 asoc->asconf_seq_in = asoc->last_acked_seq;
762
763 /* here we are different, we hold the next one we expect */
764 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
765
766 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
767 asoc->initial_rto = m->sctp_ep.initial_rto;
768
769 asoc->max_init_times = m->sctp_ep.max_init_times;
770 asoc->max_send_times = m->sctp_ep.max_send_times;
771 asoc->def_net_failure = m->sctp_ep.def_net_failure;
772
773 /* ECN Nonce initialization */
774 asoc->ecn_nonce_allowed = 0;
775 asoc->receiver_nonce_sum = 1;
776 asoc->nonce_sum_expect_base = 1;
777 asoc->nonce_sum_check = 1;
778 asoc->nonce_resync_tsn = 0;
779 asoc->nonce_wait_for_ecne = 0;
780 asoc->nonce_wait_tsn = 0;
781
782 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
783 struct in6pcb *inp6;
784
785
786 /* Its a V6 socket */
787 inp6 = (struct in6pcb *)m;
788 asoc->ipv6_addr_legal = 1;
789 /* Now look at the binding flag to see if V4 will be legal */
790 if (
791 #if defined(__OpenBSD__)
792 (0) /* we always do dual bind */
793 #elif defined (__NetBSD__)
794 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
795 #else
796 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
797 #endif
798 == 0) {
799 asoc->ipv4_addr_legal = 1;
800 } else {
801 /* V4 addresses are NOT legal on the association */
802 asoc->ipv4_addr_legal = 0;
803 }
804 } else {
805 /* Its a V4 socket, no - V6 */
806 asoc->ipv4_addr_legal = 1;
807 asoc->ipv6_addr_legal = 0;
808 }
809
810
811 asoc->my_rwnd = max(m->sctp_socket->so_rcv.ssb_hiwat, SCTP_MINIMAL_RWND);
812 asoc->peers_rwnd = m->sctp_socket->so_rcv.ssb_hiwat;
813
814 asoc->smallest_mtu = m->sctp_frag_point;
815 asoc->minrto = m->sctp_ep.sctp_minrto;
816 asoc->maxrto = m->sctp_ep.sctp_maxrto;
817
818 LIST_INIT(&asoc->sctp_local_addr_list);
819 TAILQ_INIT(&asoc->nets);
820 TAILQ_INIT(&asoc->pending_reply_queue);
821 asoc->last_asconf_ack_sent = NULL;
822 /* Setup to fill the hb random cache at first HB */
823 asoc->hb_random_idx = 4;
824
825 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
826
827 /*
828 * Now the stream parameters, here we allocate space for all
829 * streams that we request by default.
830 */
831 asoc->streamoutcnt = asoc->pre_open_streams =
832 m->sctp_ep.pre_open_stream_count;
833 asoc->strmout = kmalloc(asoc->streamoutcnt * sizeof(struct sctp_stream_out),
834 M_PCB, M_NOWAIT);
835 if (asoc->strmout == NULL) {
836 /* big trouble no memory */
837 return (ENOMEM);
838 }
839 for (i = 0; i < asoc->streamoutcnt; i++) {
840 /*
841 * inbound side must be set to 0xffff,
842 * also NOTE when we get the INIT-ACK back (for INIT sender)
843 * we MUST reduce the count (streamoutcnt) but first check
844 * if we sent to any of the upper streams that were dropped
845 * (if some were). Those that were dropped must be notified
846 * to the upper layer as failed to send.
847 */
848 asoc->strmout[i].next_sequence_sent = 0x0;
849 TAILQ_INIT(&asoc->strmout[i].outqueue);
850 asoc->strmout[i].stream_no = i;
851 asoc->strmout[i].next_spoke.tqe_next = 0;
852 asoc->strmout[i].next_spoke.tqe_prev = 0;
853 }
854 /* Now the mapping array */
855 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
856 #ifdef __NetBSD__
857 asoc->mapping_array = kmalloc(SCTP_INITIAL_MAPPING_ARRAY, M_PCB,
858 M_NOWAIT);
859 #else
860 asoc->mapping_array = kmalloc(asoc->mapping_array_size, M_PCB,
861 M_NOWAIT);
862 #endif
863 if (asoc->mapping_array == NULL) {
864 kfree(asoc->strmout, M_PCB);
865 return (ENOMEM);
866 }
867 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
868 /* Now the init of the other outqueues */
869 TAILQ_INIT(&asoc->out_wheel);
870 TAILQ_INIT(&asoc->control_send_queue);
871 TAILQ_INIT(&asoc->send_queue);
872 TAILQ_INIT(&asoc->sent_queue);
873 TAILQ_INIT(&asoc->reasmqueue);
874 TAILQ_INIT(&asoc->delivery_queue);
875 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
876
877 TAILQ_INIT(&asoc->asconf_queue);
878 return (0);
879 }
880
881 int
882 sctp_expand_mapping_array(struct sctp_association *asoc)
883 {
884 /* mapping array needs to grow */
885 u_int8_t *new_array;
886 uint16_t new_size;
887
888 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
889 #ifdef __NetBSD__
890 new_array = kmalloc(asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR,
891 M_PCB, M_NOWAIT);
892 #else
893 new_array = kmalloc(new_size, M_PCB, M_NOWAIT);
894 #endif
895 if (new_array == NULL) {
896 /* can't get more, forget it */
897 kprintf("No memory for expansion of SCTP mapping array %d\n",
898 new_size);
899 return (-1);
900 }
901 memset(new_array, 0, new_size);
902 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
903 kfree(asoc->mapping_array, M_PCB);
904 asoc->mapping_array = new_array;
905 asoc->mapping_array_size = new_size;
906 return (0);
907 }
908
909 static void
910 sctp_timeout_handler(void *t)
911 {
912 struct sctp_inpcb *inp;
913 struct sctp_tcb *stcb;
914 struct sctp_nets *net;
915 struct sctp_timer *tmr;
916 int did_output, typ;
917 #if defined(__APPLE__)
918 boolean_t funnel_state;
919
920 /* get BSD kernel funnel/mutex */
921 funnel_state = thread_funnel_set(network_flock, TRUE);
922 #endif
923
924 crit_enter();
925 tmr = (struct sctp_timer *)t;
926 inp = (struct sctp_inpcb *)tmr->ep;
927 stcb = (struct sctp_tcb *)tmr->tcb;
928 net = (struct sctp_nets *)tmr->net;
929 did_output = 1;
930
931
932 #ifdef SCTP_AUDITING_ENABLED
933 sctp_audit_log(0xF0, (u_int8_t)tmr->type);
934 sctp_auditing(3, inp, stcb, net);
935 #endif
936 sctp_pegs[SCTP_TIMERS_EXP]++;
937
938 if (inp == NULL) {
939 crit_exit();
940 return;
941 }
942
943 SCTP_INP_WLOCK(inp);
944 if (inp->sctp_socket == 0) {
945 crit_exit();
946 #if defined(__APPLE__)
947 /* release BSD kernel funnel/mutex */
948 thread_funnel_set(network_flock, FALSE);
949 #endif
950 SCTP_INP_WUNLOCK(inp);
951 return;
952 }
953 if (stcb) {
954 if (stcb->asoc.state == 0) {
955 crit_exit();
956 #if defined(__APPLE__)
957 /* release BSD kernel funnel/mutex */
958 thread_funnel_set(network_flock, FALSE);
959 #endif
960 SCTP_INP_WUNLOCK(inp);
961 return;
962 }
963 }
964 #ifdef SCTP_DEBUG
965 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
966 kprintf("Timer type %d goes off\n", tmr->type);
967 }
968 #endif /* SCTP_DEBUG */
969 #ifndef __NetBSD__
970 if (!callout_active(&tmr->timer)) {
971 crit_exit();
972 #if defined(__APPLE__)
973 /* release BSD kernel funnel/mutex */
974 thread_funnel_set(network_flock, FALSE);
975 #endif
976 SCTP_INP_WUNLOCK(inp);
977 return;
978 }
979 #endif
980 #if defined(__APPLE__)
981 /* clear the callout pending status here */
982 callout_stop(&tmr->timer);
983 #endif
984 if (stcb) {
985 SCTP_TCB_LOCK(stcb);
986 }
987 SCTP_INP_INCR_REF(inp);
988 SCTP_INP_WUNLOCK(inp);
989
990 typ = tmr->type;
991 switch (tmr->type) {
992 case SCTP_TIMER_TYPE_ITERATOR:
993 {
994 struct sctp_iterator *it;
995 it = (struct sctp_iterator *)inp;
996 sctp_iterator_timer(it);
997 }
998 break;
999 /* call the handler for the appropriate timer type */
1000 case SCTP_TIMER_TYPE_SEND:
1001 sctp_pegs[SCTP_TMIT_TIMER]++;
1002 stcb->asoc.num_send_timers_up--;
1003 if (stcb->asoc.num_send_timers_up < 0) {
1004 stcb->asoc.num_send_timers_up = 0;
1005 }
1006 if (sctp_t3rxt_timer(inp, stcb, net)) {
1007 /* no need to unlock on tcb its gone */
1008
1009 goto out_decr;
1010 }
1011 #ifdef SCTP_AUDITING_ENABLED
1012 sctp_auditing(4, inp, stcb, net);
1013 #endif
1014 sctp_chunk_output(inp, stcb, 1);
1015 if ((stcb->asoc.num_send_timers_up == 0) &&
1016 (stcb->asoc.sent_queue_cnt > 0)
1017 ) {
1018 struct sctp_tmit_chunk *chk;
1019 /*
1020 * safeguard. If there on some on the sent queue
1021 * somewhere but no timers running something is
1022 * wrong... so we start a timer on the first chunk
1023 * on the send queue on whatever net it is sent to.
1024 */
1025 sctp_pegs[SCTP_T3_SAFEGRD]++;
1026 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1027 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1028 chk->whoTo);
1029 }
1030 break;
1031 case SCTP_TIMER_TYPE_INIT:
1032 if (sctp_t1init_timer(inp, stcb, net)) {
1033 /* no need to unlock on tcb its gone */
1034 goto out_decr;
1035 }
1036 /* We do output but not here */
1037 did_output = 0;
1038 break;
1039 case SCTP_TIMER_TYPE_RECV:
1040 sctp_pegs[SCTP_RECV_TIMER]++;
1041 sctp_send_sack(stcb);
1042 #ifdef SCTP_AUDITING_ENABLED
1043 sctp_auditing(4, inp, stcb, net);
1044 #endif
1045 sctp_chunk_output(inp, stcb, 4);
1046 break;
1047 case SCTP_TIMER_TYPE_SHUTDOWN:
1048 if (sctp_shutdown_timer(inp, stcb, net) ) {
1049 /* no need to unlock on tcb its gone */
1050 goto out_decr;
1051 }
1052 #ifdef SCTP_AUDITING_ENABLED
1053 sctp_auditing(4, inp, stcb, net);
1054 #endif
1055 sctp_chunk_output(inp, stcb, 5);
1056 break;
1057 case SCTP_TIMER_TYPE_HEARTBEAT:
1058 if (sctp_heartbeat_timer(inp, stcb, net)) {
1059 /* no need to unlock on tcb its gone */
1060 goto out_decr;
1061 }
1062 #ifdef SCTP_AUDITING_ENABLED
1063 sctp_auditing(4, inp, stcb, net);
1064 #endif
1065 sctp_chunk_output(inp, stcb, 6);
1066 break;
1067 case SCTP_TIMER_TYPE_COOKIE:
1068 if (sctp_cookie_timer(inp, stcb, net)) {
1069 /* no need to unlock on tcb its gone */
1070 goto out_decr;
1071 }
1072 #ifdef SCTP_AUDITING_ENABLED
1073 sctp_auditing(4, inp, stcb, net);
1074 #endif
1075 sctp_chunk_output(inp, stcb, 1);
1076 break;
1077 case SCTP_TIMER_TYPE_NEWCOOKIE:
1078 {
1079 struct timeval tv;
1080 int i, secret;
1081 SCTP_GETTIME_TIMEVAL(&tv);
1082 SCTP_INP_WLOCK(inp);
1083 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1084 inp->sctp_ep.last_secret_number =
1085 inp->sctp_ep.current_secret_number;
1086 inp->sctp_ep.current_secret_number++;
1087 if (inp->sctp_ep.current_secret_number >=
1088 SCTP_HOW_MANY_SECRETS) {
1089 inp->sctp_ep.current_secret_number = 0;
1090 }
1091 secret = (int)inp->sctp_ep.current_secret_number;
1092 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1093 inp->sctp_ep.secret_key[secret][i] =
1094 sctp_select_initial_TSN(&inp->sctp_ep);
1095 }
1096 SCTP_INP_WUNLOCK(inp);
1097 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1098 }
1099 did_output = 0;
1100 break;
1101 case SCTP_TIMER_TYPE_PATHMTURAISE:
1102 sctp_pathmtu_timer(inp, stcb, net);
1103 did_output = 0;
1104 break;
1105 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1106 if (sctp_shutdownack_timer(inp, stcb, net)) {
1107 /* no need to unlock on tcb its gone */
1108 goto out_decr;
1109 }
1110 #ifdef SCTP_AUDITING_ENABLED
1111 sctp_auditing(4, inp, stcb, net);
1112 #endif
1113 sctp_chunk_output(inp, stcb, 7);
1114 break;
1115 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1116 sctp_abort_an_association(inp, stcb,
1117 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1118 /* no need to unlock on tcb its gone */
1119 goto out_decr;
1120 break;
1121
1122 case SCTP_TIMER_TYPE_STRRESET:
1123 if (sctp_strreset_timer(inp, stcb, net)) {
1124 /* no need to unlock on tcb its gone */
1125 goto out_decr;
1126 }
1127 sctp_chunk_output(inp, stcb, 9);
1128 break;
1129
1130 case SCTP_TIMER_TYPE_ASCONF:
1131 if (sctp_asconf_timer(inp, stcb, net)) {
1132 /* no need to unlock on tcb its gone */
1133 goto out_decr;
1134 }
1135 #ifdef SCTP_AUDITING_ENABLED
1136 sctp_auditing(4, inp, stcb, net);
1137 #endif
1138 sctp_chunk_output(inp, stcb, 8);
1139 break;
1140
1141 case SCTP_TIMER_TYPE_AUTOCLOSE:
1142 sctp_autoclose_timer(inp, stcb, net);
1143 sctp_chunk_output(inp, stcb, 10);
1144 did_output = 0;
1145 break;
1146 case SCTP_TIMER_TYPE_INPKILL:
1147 /* special case, take away our
1148 * increment since WE are the killer
1149 */
1150 SCTP_INP_WLOCK(inp);
1151 SCTP_INP_DECR_REF(inp);
1152 SCTP_INP_WUNLOCK(inp);
1153 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1154 sctp_inpcb_free(inp, 1);
1155 goto out_no_decr;
1156 break;
1157 default:
1158 #ifdef SCTP_DEBUG
1159 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1160 kprintf("sctp_timeout_handler:unknown timer %d\n",
1161 tmr->type);
1162 }
1163 #endif /* SCTP_DEBUG */
1164 break;
1165 };
1166 #ifdef SCTP_AUDITING_ENABLED
1167 sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1168 sctp_auditing(5, inp, stcb, net);
1169 #endif
1170 if (did_output) {
1171 /*
1172 * Now we need to clean up the control chunk chain if an
1173 * ECNE is on it. It must be marked as UNSENT again so next
1174 * call will continue to send it until such time that we get
1175 * a CWR, to remove it. It is, however, less likely that we
1176 * will find a ecn echo on the chain though.
1177 */
1178 sctp_fix_ecn_echo(&stcb->asoc);
1179 }
1180 if (stcb) {
1181 SCTP_TCB_UNLOCK(stcb);
1182 }
1183 out_decr:
1184 SCTP_INP_WLOCK(inp);
1185 SCTP_INP_DECR_REF(inp);
1186 SCTP_INP_WUNLOCK(inp);
1187
1188 out_no_decr:
1189
1190 #ifdef SCTP_DEBUG
1191 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1192 kprintf("Timer now complete (type %d)\n", typ);
1193 }
1194 #endif /* SCTP_DEBUG */
1195
1196 crit_exit();
1197 #if defined(__APPLE__)
1198 /* release BSD kernel funnel/mutex */
1199 thread_funnel_set(network_flock, FALSE);
1200 #endif
1201 }
1202
1203 int
1204 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1205 struct sctp_nets *net)
1206 {
1207 int to_ticks;
1208 struct sctp_timer *tmr;
1209
1210 if (inp == NULL)
1211 return (EFAULT);
1212
1213 to_ticks = 0;
1214
1215 tmr = NULL;
1216 switch (t_type) {
1217 case SCTP_TIMER_TYPE_ITERATOR:
1218 {
1219 struct sctp_iterator *it;
1220 it = (struct sctp_iterator *)inp;
1221 tmr = &it->tmr;
1222 to_ticks = SCTP_ITERATOR_TICKS;
1223 }
1224 break;
1225 case SCTP_TIMER_TYPE_SEND:
1226 /* Here we use the RTO timer */
1227 {
1228 int rto_val;
1229 if ((stcb == NULL) || (net == NULL)) {
1230 return (EFAULT);
1231 }
1232 tmr = &net->rxt_timer;
1233 if (net->RTO == 0) {
1234 rto_val = stcb->asoc.initial_rto;
1235 } else {
1236 rto_val = net->RTO;
1237 }
1238 to_ticks = MSEC_TO_TICKS(rto_val);
1239 }
1240 break;
1241 case SCTP_TIMER_TYPE_INIT:
1242 /*
1243 * Here we use the INIT timer default
1244 * usually about 1 minute.
1245 */
1246 if ((stcb == NULL) || (net == NULL)) {
1247 return (EFAULT);
1248 }
1249 tmr = &net->rxt_timer;
1250 if (net->RTO == 0) {
1251 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1252 } else {
1253 to_ticks = MSEC_TO_TICKS(net->RTO);
1254 }
1255 break;
1256 case SCTP_TIMER_TYPE_RECV:
1257 /*
1258 * Here we use the Delayed-Ack timer value from the inp
1259 * ususually about 200ms.
1260 */
1261 if (stcb == NULL) {
1262 return (EFAULT);
1263 }
1264 tmr = &stcb->asoc.dack_timer;
1265 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1266 break;
1267 case SCTP_TIMER_TYPE_SHUTDOWN:
1268 /* Here we use the RTO of the destination. */
1269 if ((stcb == NULL) || (net == NULL)) {
1270 return (EFAULT);
1271 }
1272
1273 if (net->RTO == 0) {
1274 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1275 } else {
1276 to_ticks = MSEC_TO_TICKS(net->RTO);
1277 }
1278 tmr = &net->rxt_timer;
1279 break;
1280 case SCTP_TIMER_TYPE_HEARTBEAT:
1281 /*
1282 * the net is used here so that we can add in the RTO.
1283 * Even though we use a different timer. We also add the
1284 * HB timer PLUS a random jitter.
1285 */
1286 if (stcb == NULL) {
1287 return (EFAULT);
1288 }
1289 {
1290 uint32_t rndval;
1291 uint8_t this_random;
1292 int cnt_of_unconf=0;
1293 struct sctp_nets *lnet;
1294
1295 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1296 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1297 cnt_of_unconf++;
1298 }
1299 }
1300 #ifdef SCTP_DEBUG
1301 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1302 kprintf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1303 cnt_of_unconf, stcb->asoc.heart_beat_delay);
1304 }
1305 #endif
1306 if (stcb->asoc.hb_random_idx > 3) {
1307 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1308 memcpy(stcb->asoc.hb_random_values, &rndval,
1309 sizeof(stcb->asoc.hb_random_values));
1310 this_random = stcb->asoc.hb_random_values[0];
1311 stcb->asoc.hb_random_idx = 0;
1312 stcb->asoc.hb_ect_randombit = 0;
1313 } else {
1314 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1315 stcb->asoc.hb_random_idx++;
1316 stcb->asoc.hb_ect_randombit = 0;
1317 }
1318 /*
1319 * this_random will be 0 - 256 ms
1320 * RTO is in ms.
1321 */
1322 if ((stcb->asoc.heart_beat_delay == 0) &&
1323 (cnt_of_unconf == 0)) {
1324 /* no HB on this inp after confirmations */
1325 return (0);
1326 }
1327 if (net) {
1328 struct sctp_nets *lnet;
1329 int delay;
1330 delay = stcb->asoc.heart_beat_delay;
1331 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1332 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1333 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1334 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1335 delay = 0;
1336 }
1337 }
1338 if (net->RTO == 0) {
1339 /* Never been checked */
1340 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1341 } else {
1342 /* set rto_val to the ms */
1343 to_ticks = delay + net->RTO + this_random;
1344 }
1345 } else {
1346 if (cnt_of_unconf) {
1347 to_ticks = this_random + stcb->asoc.initial_rto;
1348 } else {
1349 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1350 }
1351 }
1352 /*
1353 * Now we must convert the to_ticks that are now in
1354 * ms to ticks.
1355 */
1356 to_ticks *= hz;
1357 to_ticks /= 1000;
1358 #ifdef SCTP_DEBUG
1359 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1360 kprintf("Timer to expire in %d ticks\n", to_ticks);
1361 }
1362 #endif
1363 tmr = &stcb->asoc.hb_timer;
1364 }
1365 break;
1366 case SCTP_TIMER_TYPE_COOKIE:
1367 /*
1368 * Here we can use the RTO timer from the network since
1369 * one RTT was compelete. If a retran happened then we will
1370 * be using the RTO initial value.
1371 */
1372 if ((stcb == NULL) || (net == NULL)) {
1373 return (EFAULT);
1374 }
1375 if (net->RTO == 0) {
1376 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1377 } else {
1378 to_ticks = MSEC_TO_TICKS(net->RTO);
1379 }
1380 tmr = &net->rxt_timer;
1381 break;
1382 case SCTP_TIMER_TYPE_NEWCOOKIE:
1383 /*
1384 * nothing needed but the endpoint here
1385 * ususually about 60 minutes.
1386 */
1387 tmr = &inp->sctp_ep.signature_change;
1388 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1389 break;
1390 case SCTP_TIMER_TYPE_INPKILL:
1391 /*
1392 * The inp is setup to die. We re-use the
1393 * signature_chage timer since that has
1394 * stopped and we are in the GONE state.
1395 */
1396 tmr = &inp->sctp_ep.signature_change;
1397 to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1398 break;
1399 case SCTP_TIMER_TYPE_PATHMTURAISE:
1400 /*
1401 * Here we use the value found in the EP for PMTU
1402 * ususually about 10 minutes.
1403 */
1404 if (stcb == NULL) {
1405 return (EFAULT);
1406 }
1407 if (net == NULL) {
1408 return (EFAULT);
1409 }
1410 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1411 tmr = &net->pmtu_timer;
1412 break;
1413 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1414 /* Here we use the RTO of the destination */
1415 if ((stcb == NULL) || (net == NULL)) {
1416 return (EFAULT);
1417 }
1418 if (net->RTO == 0) {
1419 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1420 } else {
1421 to_ticks = MSEC_TO_TICKS(net->RTO);
1422 }
1423 tmr = &net->rxt_timer;
1424 break;
1425 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1426 /*
1427 * Here we use the endpoints shutdown guard timer
1428 * usually about 3 minutes.
1429 */
1430 if (stcb == NULL) {
1431 return (EFAULT);
1432 }
1433 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1434 tmr = &stcb->asoc.shut_guard_timer;
1435 break;
1436 case SCTP_TIMER_TYPE_STRRESET:
1437 /*
1438 * Here the timer comes from the inp
1439 * but its value is from the RTO.
1440 */
1441 if ((stcb == NULL) || (net == NULL)) {
1442 return (EFAULT);
1443 }
1444 if (net->RTO == 0) {
1445 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1446 } else {
1447 to_ticks = MSEC_TO_TICKS(net->RTO);
1448 }
1449 tmr = &stcb->asoc.strreset_timer;
1450 break;
1451
1452 case SCTP_TIMER_TYPE_ASCONF:
1453 /*
1454 * Here the timer comes from the inp
1455 * but its value is from the RTO.
1456 */
1457 if ((stcb == NULL) || (net == NULL)) {
1458 return (EFAULT);
1459 }
1460 if (net->RTO == 0) {
1461 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1462 } else {
1463 to_ticks = MSEC_TO_TICKS(net->RTO);
1464 }
1465 tmr = &stcb->asoc.asconf_timer;
1466 break;
1467 case SCTP_TIMER_TYPE_AUTOCLOSE:
1468 if (stcb == NULL) {
1469 return (EFAULT);
1470 }
1471 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1472 /* Really an error since stcb is NOT set to autoclose */
1473 return (0);
1474 }
1475 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1476 tmr = &stcb->asoc.autoclose_timer;
1477 break;
1478 default:
1479 #ifdef SCTP_DEBUG
1480 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1481 kprintf("sctp_timer_start:Unknown timer type %d\n",
1482 t_type);
1483 }
1484 #endif /* SCTP_DEBUG */
1485 return (EFAULT);
1486 break;
1487 };
1488 if ((to_ticks <= 0) || (tmr == NULL)) {
1489 #ifdef SCTP_DEBUG
1490 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1491 kprintf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1492 t_type, to_ticks, tmr);
1493 }
1494 #endif /* SCTP_DEBUG */
1495 return (EFAULT);
1496 }
1497 if (callout_pending(&tmr->timer)) {
1498 /*
1499 * we do NOT allow you to have it already running.
1500 * if it is we leave the current one up unchanged
1501 */
1502 return (EALREADY);
1503 }
1504 /* At this point we can proceed */
1505 if (t_type == SCTP_TIMER_TYPE_SEND) {
1506 stcb->asoc.num_send_timers_up++;
1507 }
1508 tmr->type = t_type;
1509 tmr->ep = (void *)inp;
1510 tmr->tcb = (void *)stcb;
1511 tmr->net = (void *)net;
1512 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1513 return (0);
1514 }
1515
1516 int
1517 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1518 struct sctp_nets *net)
1519 {
1520 struct sctp_timer *tmr;
1521
1522 if (inp == NULL)
1523 return (EFAULT);
1524
1525 tmr = NULL;
1526 switch (t_type) {
1527 case SCTP_TIMER_TYPE_ITERATOR:
1528 {
1529 struct sctp_iterator *it;
1530 it = (struct sctp_iterator *)inp;
1531 tmr = &it->tmr;
1532 }
1533 break;
1534 case SCTP_TIMER_TYPE_SEND:
1535 if ((stcb == NULL) || (net == NULL)) {
1536 return (EFAULT);
1537 }
1538 tmr = &net->rxt_timer;
1539 break;
1540 case SCTP_TIMER_TYPE_INIT:
1541 if ((stcb == NULL) || (net == NULL)) {
1542 return (EFAULT);
1543 }
1544 tmr = &net->rxt_timer;
1545 break;
1546 case SCTP_TIMER_TYPE_RECV:
1547 if (stcb == NULL) {
1548 return (EFAULT);
1549 }
1550 tmr = &stcb->asoc.dack_timer;
1551 break;
1552 case SCTP_TIMER_TYPE_SHUTDOWN:
1553 if ((stcb == NULL) || (net == NULL)) {
1554 return (EFAULT);
1555 }
1556 tmr = &net->rxt_timer;
1557 break;
1558 case SCTP_TIMER_TYPE_HEARTBEAT:
1559 if (stcb == NULL) {
1560 return (EFAULT);
1561 }
1562 tmr = &stcb->asoc.hb_timer;
1563 break;
1564 case SCTP_TIMER_TYPE_COOKIE:
1565 if ((stcb == NULL) || (net == NULL)) {
1566 return (EFAULT);
1567 }
1568 tmr = &net->rxt_timer;
1569 break;
1570 case SCTP_TIMER_TYPE_NEWCOOKIE:
1571 /* nothing needed but the endpoint here */
1572 tmr = &inp->sctp_ep.signature_change;
1573 /* We re-use the newcookie timer for
1574 * the INP kill timer. We must assure
1575 * that we do not kill it by accident.
1576 */
1577 break;
1578 case SCTP_TIMER_TYPE_INPKILL:
1579 /*
1580 * The inp is setup to die. We re-use the
1581 * signature_chage timer since that has
1582 * stopped and we are in the GONE state.
1583 */
1584 tmr = &inp->sctp_ep.signature_change;
1585 break;
1586 case SCTP_TIMER_TYPE_PATHMTURAISE:
1587 if (stcb == NULL) {
1588 return (EFAULT);
1589 }
1590 if (net == NULL) {
1591 return (EFAULT);
1592 }
1593 tmr = &net->pmtu_timer;
1594 break;
1595 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1596 if ((stcb == NULL) || (net == NULL)) {
1597 return (EFAULT);
1598 }
1599 tmr = &net->rxt_timer;
1600 break;
1601 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1602 if (stcb == NULL) {
1603 return (EFAULT);
1604 }
1605 tmr = &stcb->asoc.shut_guard_timer;
1606 break;
1607 case SCTP_TIMER_TYPE_STRRESET:
1608 if (stcb == NULL) {
1609 return (EFAULT);
1610 }
1611 tmr = &stcb->asoc.strreset_timer;
1612 break;
1613 case SCTP_TIMER_TYPE_ASCONF:
1614 if (stcb == NULL) {
1615 return (EFAULT);
1616 }
1617 tmr = &stcb->asoc.asconf_timer;
1618 break;
1619 case SCTP_TIMER_TYPE_AUTOCLOSE:
1620 if (stcb == NULL) {
1621 return (EFAULT);
1622 }
1623 tmr = &stcb->asoc.autoclose_timer;
1624 break;
1625 default:
1626 #ifdef SCTP_DEBUG
1627 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1628 kprintf("sctp_timer_stop:Unknown timer type %d\n",
1629 t_type);
1630 }
1631 #endif /* SCTP_DEBUG */
1632 break;
1633 };
1634 if (tmr == NULL)
1635 return (EFAULT);
1636
1637 if ((tmr->type != t_type) && tmr->type) {
1638 /*
1639 * Ok we have a timer that is under joint use. Cookie timer
1640 * per chance with the SEND timer. We therefore are NOT
1641 * running the timer that the caller wants stopped. So just
1642 * return.
1643 */
1644 return (0);
1645 }
1646 if (t_type == SCTP_TIMER_TYPE_SEND) {
1647 stcb->asoc.num_send_timers_up--;
1648 if (stcb->asoc.num_send_timers_up < 0) {
1649 stcb->asoc.num_send_timers_up = 0;
1650 }
1651 }
1652 callout_stop(&tmr->timer);
1653 return (0);
1654 }
1655
1656 #ifdef SCTP_USE_ADLER32
1657 static uint32_t
1658 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1659 {
1660 u_int32_t s1 = adler & 0xffff;
1661 u_int32_t s2 = (adler >> 16) & 0xffff;
1662 int n;
1663
1664 for (n = 0; n < len; n++, buf++) {
1665 /* s1 = (s1 + buf[n]) % BASE */
1666 /* first we add */
1667 s1 = (s1 + *buf);
1668 /*
1669 * now if we need to, we do a mod by subtracting. It seems
1670 * a bit faster since I really will only ever do one subtract
1671 * at the MOST, since buf[n] is a max of 255.
1672 */
1673 if (s1 >= SCTP_ADLER32_BASE) {
1674 s1 -= SCTP_ADLER32_BASE;
1675 }
1676 /* s2 = (s2 + s1) % BASE */
1677 /* first we add */
1678 s2 = (s2 + s1);
1679 /*
1680 * again, it is more efficent (it seems) to subtract since
1681 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1682 * case. This would then be (2 * BASE) - 2, which will still
1683 * only do one subtract. On Intel this is much better to do
1684 * this way and avoid the divide. Have not -pg'd on sparc.
1685 */
1686 if (s2 >= SCTP_ADLER32_BASE) {
1687 s2 -= SCTP_ADLER32_BASE;
1688 }
1689 }
1690 /* Return the adler32 of the bytes buf[0..len-1] */
1691 return ((s2 << 16) + s1);
1692 }
1693
1694 #endif
1695
1696
1697 u_int32_t
1698 sctp_calculate_len(struct mbuf *m)
1699 {
1700 u_int32_t tlen=0;
1701 struct mbuf *at;
1702 at = m;
1703 while (at) {
1704 tlen += at->m_len;
1705 at = at->m_next;
1706 }
1707 return (tlen);
1708 }
1709
1710 #if defined(SCTP_WITH_NO_CSUM)
1711
1712 uint32_t
1713 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1714 {
1715 /*
1716 * given a mbuf chain with a packetheader offset by 'offset'
1717 * pointing at a sctphdr (with csum set to 0) go through
1718 * the chain of m_next's and calculate the SCTP checksum.
1719 * This is currently Adler32 but will change to CRC32x
1720 * soon. Also has a side bonus calculate the total length
1721 * of the mbuf chain.
1722 * Note: if offset is greater than the total mbuf length,
1723 * checksum=1, pktlen=0 is returned (ie. no real error code)
1724 */
1725 if (pktlen == NULL)
1726 return (0);
1727 *pktlen = sctp_calculate_len(m);
1728 return (0);
1729 }
1730
1731 #elif defined(SCTP_USE_INCHKSUM)
1732
1733 #include <machine/in_cksum.h>
1734
1735 uint32_t
1736 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1737 {
1738 /*
1739 * given a mbuf chain with a packetheader offset by 'offset'
1740 * pointing at a sctphdr (with csum set to 0) go through
1741 * the chain of m_next's and calculate the SCTP checksum.
1742 * This is currently Adler32 but will change to CRC32x
1743 * soon. Also has a side bonus calculate the total length
1744 * of the mbuf chain.
1745 * Note: if offset is greater than the total mbuf length,
1746 * checksum=1, pktlen=0 is returned (ie. no real error code)
1747 */
1748 int32_t tlen=0;
1749 struct mbuf *at;
1750 uint32_t the_sum, retsum;
1751
1752 at = m;
1753 while (at) {
1754 tlen += at->m_len;
1755 at = at->m_next;
1756 }
1757 the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1758 if (pktlen != NULL)
1759 *pktlen = (tlen-offset);
1760 retsum = htons(the_sum);
1761 return (the_sum);
1762 }
1763
1764 #else
1765
1766 uint32_t
1767 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1768 {
1769 /*
1770 * given a mbuf chain with a packetheader offset by 'offset'
1771 * pointing at a sctphdr (with csum set to 0) go through
1772 * the chain of m_next's and calculate the SCTP checksum.
1773 * This is currently Adler32 but will change to CRC32x
1774 * soon. Also has a side bonus calculate the total length
1775 * of the mbuf chain.
1776 * Note: if offset is greater than the total mbuf length,
1777 * checksum=1, pktlen=0 is returned (ie. no real error code)
1778 */
1779 int32_t tlen=0;
1780 #ifdef SCTP_USE_ADLER32
1781 uint32_t base = 1L;
1782 #else
1783 uint32_t base = 0xffffffff;
1784 #endif /* SCTP_USE_ADLER32 */
1785 struct mbuf *at;
1786 at = m;
1787 /* find the correct mbuf and offset into mbuf */
1788 while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1789 offset -= at->m_len; /* update remaining offset left */
1790 at = at->m_next;
1791 }
1792
1793 while (at != NULL) {
1794 #ifdef SCTP_USE_ADLER32
1795 base = update_adler32(base, at->m_data + offset,
1796 at->m_len - offset);
1797 #else
1798 base = update_crc32(base, at->m_data + offset,
1799 at->m_len - offset);
1800 #endif /* SCTP_USE_ADLER32 */
1801 tlen += at->m_len - offset;
1802 /* we only offset once into the first mbuf */
1803 if (offset) {
1804 offset = 0;
1805 }
1806 at = at->m_next;
1807 }
1808 if (pktlen != NULL) {
1809 *pktlen = tlen;
1810 }
1811 #ifdef SCTP_USE_ADLER32
1812 /* Adler32 */
1813 base = htonl(base);
1814 #else
1815 /* CRC-32c */
1816 base = sctp_csum_finalize(base);
1817 #endif
1818 return (base);
1819 }
1820
1821
1822 #endif
1823
1824 void
1825 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1826 struct sctp_association *asoc, u_long mtu)
1827 {
1828 /*
1829 * Reset the P-MTU size on this association, this involves changing
1830 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1831 * to allow the DF flag to be cleared.
1832 */
1833 struct sctp_tmit_chunk *chk;
1834 struct sctp_stream_out *strm;
1835 unsigned int eff_mtu, ovh;
1836 asoc->smallest_mtu = mtu;
1837 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1838 ovh = SCTP_MIN_OVERHEAD;
1839 } else {
1840 ovh = SCTP_MIN_V4_OVERHEAD;
1841 }
1842 eff_mtu = mtu - ovh;
1843 /* Now mark any chunks that need to let IP fragment */
1844 TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1845 TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1846 if (chk->send_size > eff_mtu) {
1847 chk->flags &= SCTP_DONT_FRAGMENT;
1848 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1849 }
1850 }
1851 }
1852 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1853 if (chk->send_size > eff_mtu) {
1854 chk->flags &= SCTP_DONT_FRAGMENT;
1855 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1856 }
1857 }
1858 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1859 if (chk->send_size > eff_mtu) {
1860 chk->flags &= SCTP_DONT_FRAGMENT;
1861 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1862 }
1863 }
1864 }
1865
1866
1867 /*
1868 * given an association and starting time of the current RTT period
1869 * return RTO in number of usecs
1870 * net should point to the current network
1871 */
1872 u_int32_t
1873 sctp_calculate_rto(struct sctp_tcb *stcb,
1874 struct sctp_association *asoc,
1875 struct sctp_nets *net,
1876 struct timeval *old)
1877 {
1878 /*
1879 * given an association and the starting time of the current RTT
1880 * period (in value1/value2) return RTO in number of usecs.
1881 */
1882 int calc_time = 0;
1883 unsigned int new_rto = 0;
1884 int first_measure = 0;
1885 struct timeval now;
1886
1887 /************************/
1888 /* 1. calculate new RTT */
1889 /************************/
1890 /* get the current time */
1891 SCTP_GETTIME_TIMEVAL(&now);
1892 /* compute the RTT value */
1893 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1894 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1895 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1896 calc_time += (((u_long)now.tv_usec -
1897 (u_long)old->tv_usec)/1000);
1898 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1899 /* Borrow 1,000ms from current calculation */
1900 calc_time -= 1000;
1901 /* Add in the slop over */
1902 calc_time += ((int)now.tv_usec/1000);
1903 /* Add in the pre-second ms's */
1904 calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1905 }
1906 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1907 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1908 calc_time = ((u_long)now.tv_usec -
1909 (u_long)old->tv_usec)/1000;
1910 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1911 /* impossible .. garbage in nothing out */
1912 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1913 } else {
1914 /* impossible .. garbage in nothing out */
1915 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1916 }
1917 } else {
1918 /* Clock wrapped? */
1919 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1920 }
1921 /***************************/
1922 /* 2. update RTTVAR & SRTT */
1923 /***************************/
1924 #if 0
1925 /* if (net->lastsv || net->lastsa) {*/
1926 /* per Section 5.3.1 C3 in SCTP */
1927 /* net->lastsv = (int) *//* RTTVAR */
1928 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1929 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1930 net->lastsa = (int) */ /* SRTT */
1931 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1932 (double)(0.125 * (double)calc_time));
1933 } else {
1934 *//* the first RTT calculation, per C2 Section 5.3.1 */
1935 /* net->lastsa = calc_time; *//* SRTT */
1936 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1937 /* }*/
1938 /* if RTTVAR goes to 0 you set to clock grainularity */
1939 /* if (net->lastsv == 0) {
1940 net->lastsv = SCTP_CLOCK_GRANULARITY;
1941 }
1942 new_rto = net->lastsa + 4 * net->lastsv;
1943 */
1944 #endif
1945 /* this is Van Jacobson's integer version */
1946 if (net->RTO) {
1947 calc_time -= (net->lastsa >> 3);
1948 net->lastsa += calc_time;
1949 if (calc_time < 0) {
1950 calc_time = -calc_time;
1951 }
1952 calc_time -= (net->lastsv >> 2);
1953 net->lastsv += calc_time;
1954 if (net->lastsv == 0) {
1955 net->lastsv = SCTP_CLOCK_GRANULARITY;
1956 }
1957 } else {
1958 /* First RTO measurment */
1959 net->lastsa = calc_time;
1960 net->lastsv = calc_time >> 1;
1961 first_measure = 1;
1962 }
1963 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1964 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1965 (stcb->asoc.sat_network_lockout == 0)) {
1966 stcb->asoc.sat_network = 1;
1967 } else if ((!first_measure) && stcb->asoc.sat_network) {
1968 stcb->asoc.sat_network = 0;
1969 stcb->asoc.sat_network_lockout = 1;
1970 }
1971 /* bound it, per C6/C7 in Section 5.3.1 */
1972 if (new_rto < stcb->asoc.minrto) {
1973 new_rto = stcb->asoc.minrto;
1974 }
1975 if (new_rto > stcb->asoc.maxrto) {
1976 new_rto = stcb->asoc.maxrto;
1977 }
1978 /* we are now returning the RTT Smoothed */
1979 return ((u_int32_t)new_rto);
1980 }
1981
1982
1983 /*
1984 * return a pointer to a contiguous piece of data from the given
1985 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1986 * piece spans more than one mbuf, a copy is made at 'ptr'.
1987 * caller must ensure that the buffer size is >= 'len'
1988 * returns NULL if there there isn't 'len' bytes in the chain.
1989 */
1990 caddr_t
1991 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1992 {
1993 uint32_t count;
1994 uint8_t *ptr;
1995 ptr = in_ptr;
1996 if ((off < 0) || (len <= 0))
1997 return (NULL);
1998
1999 /* find the desired start location */
2000 while ((m != NULL) && (off > 0)) {
2001 if (off < m->m_len)
2002 break;
2003 off -= m->m_len;
2004 m = m->m_next;
2005 }
2006 if (m == NULL)
2007 return (NULL);
2008
2009 /* is the current mbuf large enough (eg. contiguous)? */
2010 if ((m->m_len - off) >= len) {
2011 return (mtod(m, caddr_t) + off);
2012 } else {
2013 /* else, it spans more than one mbuf, so save a temp copy... */
2014 while ((m != NULL) && (len > 0)) {
2015 count = min(m->m_len - off, len);
2016 bcopy(mtod(m, caddr_t) + off, ptr, count);
2017 len -= count;
2018 ptr += count;
2019 off = 0;
2020 m = m->m_next;
2021 }
2022 if ((m == NULL) && (len > 0))
2023 return (NULL);
2024 else
2025 return ((caddr_t)in_ptr);
2026 }
2027 }
2028
2029
2030 struct sctp_paramhdr *
2031 sctp_get_next_param(struct mbuf *m,
2032 int offset,
2033 struct sctp_paramhdr *pull,
2034 int pull_limit)
2035 {
2036 /* This just provides a typed signature to Peter's Pull routine */
2037 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2038 (u_int8_t *)pull));
2039 }
2040
2041
2042 int
2043 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2044 {
2045 /*
2046 * add padlen bytes of 0 filled padding to the end of the mbuf.
2047 * If padlen is > 3 this routine will fail.
2048 */
2049 u_int8_t *dp;
2050 int i;
2051 if (padlen > 3) {
2052 return (ENOBUFS);
2053 }
2054 if (M_TRAILINGSPACE(m)) {
2055 /*
2056 * The easy way.
2057 * We hope the majority of the time we hit here :)
2058 */
2059 dp = (u_int8_t *)(mtod(m, caddr_t) + m->m_len);
2060 m->m_len += padlen;
2061 } else {
2062 /* Hard way we must grow the mbuf */
2063 struct mbuf *tmp;
2064 MGET(tmp, MB_DONTWAIT, MT_DATA);
2065 if (tmp == NULL) {
2066 /* Out of space GAK! we are in big trouble. */
2067 return (ENOSPC);
2068 }
2069 /* setup and insert in middle */
2070 tmp->m_next = m->m_next;
2071 tmp->m_len = padlen;
2072 m->m_next = tmp;
2073 dp = mtod(tmp, u_int8_t *);
2074 }
2075 /* zero out the pad */
2076 for (i= 0; i < padlen; i++) {
2077 *dp = 0;
2078 dp++;
2079 }
2080 return (0);
2081 }
2082
2083 int
2084 sctp_pad_lastmbuf(struct mbuf *m, int padval)
2085 {
2086 /* find the last mbuf in chain and pad it */
2087 struct mbuf *m_at;
2088 m_at = m;
2089 while (m_at) {
2090 if (m_at->m_next == NULL) {
2091 return (sctp_add_pad_tombuf(m_at, padval));
2092 }
2093 m_at = m_at->m_next;
2094 }
2095 return (EFAULT);
2096 }
2097
2098 static void
2099 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
2100 u_int32_t error)
2101 {
2102 struct mbuf *m_notify;
2103 struct sctp_assoc_change *sac;
2104 struct sockaddr *to;
2105 struct sockaddr_in6 sin6, lsa6;
2106
2107 /*
2108 * First if we are are going down dump everything we
2109 * can to the socket rcv queue.
2110 */
2111 if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
2112 sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
2113 }
2114
2115 /*
2116 * For TCP model AND UDP connected sockets we will send
2117 * an error up when an ABORT comes in.
2118 */
2119 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2120 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2121 (event == SCTP_COMM_LOST)) {
2122 stcb->sctp_socket->so_error = ECONNRESET;
2123 /* Wake ANY sleepers */
2124 sowwakeup(stcb->sctp_socket);
2125 sorwakeup(stcb->sctp_socket);
2126 }
2127 #if 0
2128 if ((event == SCTP_COMM_UP) &&
2129 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2130 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2131 soisconnected(stcb->sctp_socket);
2132 }
2133 #endif
2134 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2135 /* event not enabled */
2136 return;
2137 }
2138 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2139 if (m_notify == NULL)
2140 /* no space left */
2141 return;
2142 m_notify->m_len = 0;
2143
2144 sac = mtod(m_notify, struct sctp_assoc_change *);
2145 sac->sac_type = SCTP_ASSOC_CHANGE;
2146 sac->sac_flags = 0;
2147 sac->sac_length = sizeof(struct sctp_assoc_change);
2148 sac->sac_state = event;
2149 sac->sac_error = error;
2150 /* XXX verify these stream counts */
2151 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2152 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2153 sac->sac_assoc_id = sctp_get_associd(stcb);
2154
2155 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2156 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2157 m_notify->m_pkthdr.rcvif = 0;
2158 m_notify->m_len = sizeof(struct sctp_assoc_change);
2159 m_notify->m_next = NULL;
2160
2161 /* append to socket */
2162 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2163 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2164 to->sa_family == AF_INET) {
2165 struct sockaddr_in *sin;
2166
2167 sin = (struct sockaddr_in *)to;
2168 bzero(&sin6, sizeof(sin6));
2169 sin6.sin6_family = AF_INET6;
2170 sin6.sin6_len = sizeof(struct sockaddr_in6);
2171 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2172 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2173 sizeof(sin6.sin6_addr.s6_addr16[3]));
2174 sin6.sin6_port = sin->sin_port;
2175 to = (struct sockaddr *)&sin6;
2176 }
2177 /* check and strip embedded scope junk */
2178 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2179 &lsa6);
2180 /*
2181 * We need to always notify comm changes.
2182 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2183 * sctp_m_freem(m_notify);
2184 * return;
2185 * }
2186 */
2187 SCTP_TCB_UNLOCK(stcb);
2188 SCTP_INP_WLOCK(stcb->sctp_ep);
2189 SCTP_TCB_LOCK(stcb);
2190 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2191 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2192 to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2193 /* not enough room */
2194 sctp_m_freem(m_notify);
2195 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2196 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2197 return;
2198 }
2199 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2200 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2201 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2202 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2203 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2204 }
2205 } else {
2206 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2207 }
2208 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2209 /* Wake up any sleeper */
2210 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2211 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2212 }
2213
2214 static void
2215 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2216 struct sockaddr *sa, uint32_t error)
2217 {
2218 struct mbuf *m_notify;
2219 struct sctp_paddr_change *spc;
2220 struct sockaddr *to;
2221 struct sockaddr_in6 sin6, lsa6;
2222
2223 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2224 /* event not enabled */
2225 return;
2226
2227 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2228 if (m_notify == NULL)
2229 return;
2230 m_notify->m_len = 0;
2231
2232 MCLGET(m_notify, MB_DONTWAIT);
2233 if ((m_notify->m_flags & M_EXT) != M_EXT) {
2234 sctp_m_freem(m_notify);
2235 return;
2236 }
2237
2238 spc = mtod(m_notify, struct sctp_paddr_change *);
2239 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2240 spc->spc_flags = 0;
2241 spc->spc_length = sizeof(struct sctp_paddr_change);
2242 if (sa->sa_family == AF_INET) {
2243 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2244 } else {
2245 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2246 }
2247 spc->spc_state = state;
2248 spc->spc_error = error;
2249 spc->spc_assoc_id = sctp_get_associd(stcb);
2250
2251 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2252 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2253 m_notify->m_pkthdr.rcvif = 0;
2254 m_notify->m_len = sizeof(struct sctp_paddr_change);
2255 m_notify->m_next = NULL;
2256
2257 to = (struct sockaddr *)
2258 &stcb->asoc.primary_destination->ro._l_addr;
2259 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2260 to->sa_family == AF_INET) {
2261 struct sockaddr_in *sin;
2262
2263 sin = (struct sockaddr_in *)to;
2264 bzero(&sin6, sizeof(sin6));
2265 sin6.sin6_family = AF_INET6;
2266 sin6.sin6_len = sizeof(struct sockaddr_in6);
2267 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2268 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2269 sizeof(sin6.sin6_addr.s6_addr16[3]));
2270 sin6.sin6_port = sin->sin_port;
2271 to = (struct sockaddr *)&sin6;
2272 }
2273 /* check and strip embedded scope junk */
2274 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2275 &lsa6);
2276
2277 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2278 sctp_m_freem(m_notify);
2279 return;
2280 }
2281 /* append to socket */
2282 SCTP_TCB_UNLOCK(stcb);
2283 SCTP_INP_WLOCK(stcb->sctp_ep);
2284 SCTP_TCB_LOCK(stcb);
2285 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2286 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2287 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2288 /* not enough room */
2289 sctp_m_freem(m_notify);
2290 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2291 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2292 return;
2293 }
2294 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2295 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2296 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2297 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2298 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2299 }
2300 } else {
2301 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2302 }
2303 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2304 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2305 }
2306
2307
2308 static void
2309 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2310 struct sctp_tmit_chunk *chk)
2311 {
2312 struct mbuf *m_notify;
2313 struct sctp_send_failed *ssf;
2314 struct sockaddr_in6 sin6, lsa6;
2315 struct sockaddr *to;
2316 int length;
2317
2318 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2319 /* event not enabled */
2320 return;
2321
2322 length = sizeof(struct sctp_send_failed) + chk->send_size;
2323 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2324 if (m_notify == NULL)
2325 /* no space left */
2326 return;
2327 m_notify->m_len = 0;
2328 ssf = mtod(m_notify, struct sctp_send_failed *);
2329 ssf->ssf_type = SCTP_SEND_FAILED;
2330 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2331 ssf->ssf_flags = SCTP_DATA_UNSENT;
2332 else
2333 ssf->ssf_flags = SCTP_DATA_SENT;
2334 ssf->ssf_length = length;
2335 ssf->ssf_error = error;
2336 /* not exactly what the user sent in, but should be close :) */
2337 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2338 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2339 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2340 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2341 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2342 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2343 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2344 m_notify->m_next = chk->data;
2345 if (m_notify->m_next == NULL)
2346 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2347 else {
2348 struct mbuf *m;
2349 m_notify->m_flags |= M_NOTIFICATION;
2350 m = m_notify;
2351 while (m->m_next != NULL)
2352 m = m->m_next;
2353 m->m_flags |= M_EOR;
2354 }
2355 m_notify->m_pkthdr.len = length;
2356 m_notify->m_pkthdr.rcvif = 0;
2357 m_notify->m_len = sizeof(struct sctp_send_failed);
2358
2359 /* Steal off the mbuf */
2360 chk->data = NULL;
2361 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2362 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2363 to->sa_family == AF_INET) {
2364 struct sockaddr_in *sin;
2365
2366 sin = (struct sockaddr_in *)to;
2367 bzero(&sin6, sizeof(sin6));
2368 sin6.sin6_family = AF_INET6;
2369 sin6.sin6_len = sizeof(struct sockaddr_in6);
2370 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2371 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2372 sizeof(sin6.sin6_addr.s6_addr16[3]));
2373 sin6.sin6_port = sin->sin_port;
2374 to = (struct sockaddr *)&sin6;
2375 }
2376 /* check and strip embedded scope junk */
2377 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2378 &lsa6);
2379
2380 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2381 sctp_m_freem(m_notify);
2382 return;
2383 }
2384
2385 /* append to socket */
2386 SCTP_TCB_UNLOCK(stcb);
2387 SCTP_INP_WLOCK(stcb->sctp_ep);
2388 SCTP_TCB_LOCK(stcb);
2389 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2390 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2391 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2392 /* not enough room */
2393 sctp_m_freem(m_notify);
2394 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2395 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2396 return;
2397 }
2398 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2399 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2400 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2401 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2402 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2403 }
2404 } else {
2405 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2406 }
2407 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2408 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2409 }
2410
2411 static void
2412 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2413 u_int32_t error)
2414 {
2415 struct mbuf *m_notify;
2416 struct sctp_adaption_event *sai;
2417 struct sockaddr_in6 sin6, lsa6;
2418 struct sockaddr *to;
2419
2420 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2421 /* event not enabled */
2422 return;
2423
2424 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2425 if (m_notify == NULL)
2426 /* no space left */
2427 return;
2428 m_notify->m_len = 0;
2429 sai = mtod(m_notify, struct sctp_adaption_event *);
2430 sai->sai_type = SCTP_ADAPTION_INDICATION;
2431 sai->sai_flags = 0;
2432 sai->sai_length = sizeof(struct sctp_adaption_event);
2433 sai->sai_adaption_ind = error;
2434 sai->sai_assoc_id = sctp_get_associd(stcb);
2435
2436 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2437 m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2438 m_notify->m_pkthdr.rcvif = 0;
2439 m_notify->m_len = sizeof(struct sctp_adaption_event);
2440 m_notify->m_next = NULL;
2441
2442 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2443 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2444 (to->sa_family == AF_INET)) {
2445 struct sockaddr_in *sin;
2446
2447 sin = (struct sockaddr_in *)to;
2448 bzero(&sin6, sizeof(sin6));
2449 sin6.sin6_family = AF_INET6;
2450 sin6.sin6_len = sizeof(struct sockaddr_in6);
2451 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2452 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2453 sizeof(sin6.sin6_addr.s6_addr16[3]));
2454 sin6.sin6_port = sin->sin_port;
2455 to = (struct sockaddr *)&sin6;
2456 }
2457 /* check and strip embedded scope junk */
2458 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2459 &lsa6);
2460 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2461 sctp_m_freem(m_notify);
2462 return;
2463 }
2464 /* append to socket */
2465 SCTP_TCB_UNLOCK(stcb);
2466 SCTP_INP_WLOCK(stcb->sctp_ep);
2467 SCTP_TCB_LOCK(stcb);
2468 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2469 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2470 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2471 /* not enough room */
2472 sctp_m_freem(m_notify);
2473 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2474 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2475 return;
2476 }
2477 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2478 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2479 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2480 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2481 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2482 }
2483 } else {
2484 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2485 }
2486 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2487 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2488 }
2489
2490 static void
2491 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2492 u_int32_t error)
2493 {
2494 struct mbuf *m_notify;
2495 struct sctp_pdapi_event *pdapi;
2496 struct sockaddr_in6 sin6, lsa6;
2497 struct sockaddr *to;
2498
2499 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2500 /* event not enabled */
2501 return;
2502
2503 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2504 if (m_notify == NULL)
2505 /* no space left */
2506 return;
2507 m_notify->m_len = 0;
2508 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2509 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2510 pdapi->pdapi_flags = 0;
2511 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2512 pdapi->pdapi_indication = error;
2513 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2514
2515 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2516 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2517 m_notify->m_pkthdr.rcvif = 0;
2518 m_notify->m_len = sizeof(struct sctp_pdapi_event);
2519 m_notify->m_next = NULL;
2520
2521 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2522 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2523 (to->sa_family == AF_INET)) {
2524 struct sockaddr_in *sin;
2525
2526 sin = (struct sockaddr_in *)to;
2527 bzero(&sin6, sizeof(sin6));
2528 sin6.sin6_family = AF_INET6;
2529 sin6.sin6_len = sizeof(struct sockaddr_in6);
2530 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2531 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2532 sizeof(sin6.sin6_addr.s6_addr16[3]));
2533 sin6.sin6_port = sin->sin_port;
2534 to = (struct sockaddr *)&sin6;
2535 }
2536 /* check and strip embedded scope junk */
2537 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2538 &lsa6);
2539 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2540 sctp_m_freem(m_notify);
2541 return;
2542 }
2543 /* append to socket */
2544 SCTP_TCB_UNLOCK(stcb);
2545 SCTP_INP_WLOCK(stcb->sctp_ep);
2546 SCTP_TCB_LOCK(stcb);
2547 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2548 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2549 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2550 /* not enough room */
2551 sctp_m_freem(m_notify);
2552 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2553 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2554 return;
2555 }
2556 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2557 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2558 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2559 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2560 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2561 }
2562 } else {
2563 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2564 }
2565 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2566 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2567 }
2568
2569 static void
2570 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2571 {
2572 struct mbuf *m_notify;
2573 struct sctp_shutdown_event *sse;
2574 struct sockaddr_in6 sin6, lsa6;
2575 struct sockaddr *to;
2576
2577 /*
2578 * For TCP model AND UDP connected sockets we will send
2579 * an error up when an SHUTDOWN completes
2580 */
2581 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2582 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2583 /* mark socket closed for read/write and wakeup! */
2584 socantrcvmore(stcb->sctp_socket);
2585 socantsendmore(stcb->sctp_socket);
2586 }
2587
2588 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2589 /* event not enabled */
2590 return;
2591
2592 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2593 if (m_notify == NULL)
2594 /* no space left */
2595 return;
2596 m_notify->m_len = 0;
2597 sse = mtod(m_notify, struct sctp_shutdown_event *);
2598 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2599 sse->sse_flags = 0;
2600 sse->sse_length = sizeof(struct sctp_shutdown_event);
2601 sse->sse_assoc_id = sctp_get_associd(stcb);
2602
2603 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2604 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2605 m_notify->m_pkthdr.rcvif = 0;
2606 m_notify->m_len = sizeof(struct sctp_shutdown_event);
2607 m_notify->m_next = NULL;
2608
2609 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2610 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2611 to->sa_family == AF_INET) {
2612 struct sockaddr_in *sin;
2613
2614 sin = (struct sockaddr_in *)to;
2615 bzero(&sin6, sizeof(sin6));
2616 sin6.sin6_family = AF_INET6;
2617 sin6.sin6_len = sizeof(struct sockaddr_in6);
2618 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2619 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2620 sizeof(sin6.sin6_addr.s6_addr16[3]));
2621 sin6.sin6_port = sin->sin_port;
2622 to = (struct sockaddr *)&sin6;
2623 }
2624 /* check and strip embedded scope junk */
2625 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2626 &lsa6);
2627 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2628 sctp_m_freem(m_notify);
2629 return;
2630 }
2631 /* append to socket */
2632 SCTP_TCB_UNLOCK(stcb);
2633 SCTP_INP_WLOCK(stcb->sctp_ep);
2634 SCTP_TCB_LOCK(stcb);
2635 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2636 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2637 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2638 /* not enough room */
2639 sctp_m_freem(m_notify);
2640 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2641 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2642 return;
2643 }
2644 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2645 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2646 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2647 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2648 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2649 }
2650 } else {
2651 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2652 }
2653 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2654 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2655 }
2656
2657 static void
2658 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2659 int number_entries, uint16_t *list, int flag)
2660 {
2661 struct mbuf *m_notify;
2662 struct sctp_stream_reset_event *strreset;
2663 struct sockaddr_in6 sin6, lsa6;
2664 struct sockaddr *to;
2665 int len;
2666
2667 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2668 /* event not enabled */
2669 return;
2670
2671 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2672 if (m_notify == NULL)
2673 /* no space left */
2674 return;
2675 m_notify->m_len = 0;
2676 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2677 if (len > M_TRAILINGSPACE(m_notify)) {
2678 MCLGET(m_notify, MB_WAIT);
2679 }
2680 if (m_notify == NULL)
2681 /* no clusters */
2682 return;
2683
2684 if (len > M_TRAILINGSPACE(m_notify)) {
2685 /* never enough room */
2686 m_freem(m_notify);
2687 return;
2688 }
2689 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2690 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2691 if (number_entries == 0) {
2692 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2693 } else {
2694 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2695 }
2696 strreset->strreset_length = len;
2697 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2698 if (number_entries) {
2699 int i;
2700 for (i=0; i<number_entries; i++) {
2701 strreset->strreset_list[i] = list[i];
2702 }
2703 }
2704 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2705 m_notify->m_pkthdr.len = len;
2706 m_notify->m_pkthdr.rcvif = 0;
2707 m_notify->m_len = len;
2708 m_notify->m_next = NULL;
2709 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2710 /* no space */
2711 sctp_m_freem(m_notify);
2712 return;
2713 }
2714 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2715 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2716 to->sa_family == AF_INET) {
2717 struct sockaddr_in *sin;
2718
2719 sin = (struct sockaddr_in *)to;
2720 bzero(&sin6, sizeof(sin6));
2721 sin6.sin6_family = AF_INET6;
2722 sin6.sin6_len = sizeof(struct sockaddr_in6);
2723 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2724 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2725 sizeof(sin6.sin6_addr.s6_addr16[3]));
2726 sin6.sin6_port = sin->sin_port;
2727 to = (struct sockaddr *)&sin6;
2728 }
2729 /* check and strip embedded scope junk */
2730 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2731 &lsa6);
2732 /* append to socket */
2733 SCTP_TCB_UNLOCK(stcb);
2734 SCTP_INP_WLOCK(stcb->sctp_ep);
2735 SCTP_TCB_LOCK(stcb);
2736 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2737 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2738 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2739 /* not enough room */
2740 sctp_m_freem(m_notify);
2741 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2742 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2743 return;
2744 }
2745 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2746 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2747 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2748 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2749 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2750 }
2751 } else {
2752 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2753 }
2754 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2755 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2756 }
2757
2758
2759 void
2760 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2761 u_int32_t error, void *data)
2762 {
2763 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2764 /* No notifications up when we are in a no socket state */
2765 return;
2766 }
2767 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2768 /* Can't send up to a closed socket any notifications */
2769 return;
2770 }
2771 switch (notification) {
2772 case SCTP_NOTIFY_ASSOC_UP:
2773 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2774 break;
2775 case SCTP_NOTIFY_ASSOC_DOWN:
2776 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2777 break;
2778 case SCTP_NOTIFY_INTERFACE_DOWN:
2779 {
2780 struct sctp_nets *net;
2781 net = (struct sctp_nets *)data;
2782 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2783 (struct sockaddr *)&net->ro._l_addr, error);
2784 break;
2785 }
2786 case SCTP_NOTIFY_INTERFACE_UP:
2787 {
2788 struct sctp_nets *net;
2789 net = (struct sctp_nets *)data;
2790 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2791 (struct sockaddr *)&net->ro._l_addr, error);
2792 break;
2793 }
2794 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2795 {
2796 struct sctp_nets *net;
2797 net = (struct sctp_nets *)data;
2798 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2799 (struct sockaddr *)&net->ro._l_addr, error);
2800 break;
2801 }
2802 case SCTP_NOTIFY_DG_FAIL:
2803 sctp_notify_send_failed(stcb, error,
2804 (struct sctp_tmit_chunk *)data);
2805 break;
2806 case SCTP_NOTIFY_ADAPTION_INDICATION:
2807 /* Here the error is the adaption indication */
2808 sctp_notify_adaption_layer(stcb, error);
2809 break;
2810 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2811 sctp_notify_partial_delivery_indication(stcb, error);
2812 break;
2813 case SCTP_NOTIFY_STRDATA_ERR:
2814 break;
2815 case SCTP_NOTIFY_ASSOC_ABORTED:
2816 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2817 break;
2818 case SCTP_NOTIFY_PEER_OPENED_STREAM:
2819 break;
2820 case SCTP_NOTIFY_STREAM_OPENED_OK:
2821 break;
2822 case SCTP_NOTIFY_ASSOC_RESTART:
2823 sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2824 break;
2825 case SCTP_NOTIFY_HB_RESP:
2826 break;
2827 case SCTP_NOTIFY_STR_RESET_SEND:
2828 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2829 break;
2830 case SCTP_NOTIFY_STR_RESET_RECV:
2831 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2832 break;
2833 case SCTP_NOTIFY_ASCONF_ADD_IP:
2834 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2835 error);
2836 break;
2837 case SCTP_NOTIFY_ASCONF_DELETE_IP:
2838 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2839 error);
2840 break;
2841 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2842 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2843 error);
2844 break;
2845 case SCTP_NOTIFY_ASCONF_SUCCESS:
2846 break;
2847 case SCTP_NOTIFY_ASCONF_FAILED:
2848 break;
2849 case SCTP_NOTIFY_PEER_SHUTDOWN:
2850 sctp_notify_shutdown_event(stcb);
2851 break;
2852 default:
2853 #ifdef SCTP_DEBUG
2854 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2855 kprintf("NOTIFY: unknown notification %xh (%u)\n",
2856 notification, notification);
2857 }
2858 #endif /* SCTP_DEBUG */
2859 break;
2860 } /* end switch */
2861 }
2862
2863 void
2864 sctp_report_all_outbound(struct sctp_tcb *stcb)
2865 {
2866 struct sctp_association *asoc;
2867 struct sctp_stream_out *outs;
2868 struct sctp_tmit_chunk *chk;
2869
2870 asoc = &stcb->asoc;
2871
2872 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2873 return;
2874 }
2875 /* now through all the gunk freeing chunks */
2876 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2877 /* now clean up any chunks here */
2878 chk = TAILQ_FIRST(&outs->outqueue);
2879 while (chk) {
2880 stcb->asoc.stream_queue_cnt--;
2881 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2882 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2883 SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2884 if (chk->data) {
2885 sctp_m_freem(chk->data);
2886 chk->data = NULL;
2887 }
2888 if (chk->whoTo)
2889 sctp_free_remote_addr(chk->whoTo);
2890 chk->whoTo = NULL;
2891 chk->asoc = NULL;
2892 /* Free the chunk */
2893 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2894 sctppcbinfo.ipi_count_chunk--;
2895 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2896 panic("Chunk count is negative");
2897 }
2898 sctppcbinfo.ipi_gencnt_chunk++;
2899 chk = TAILQ_FIRST(&outs->outqueue);
2900 }
2901 }
2902 /* pending send queue SHOULD be empty */
2903 if (!TAILQ_EMPTY(&asoc->send_queue)) {
2904 chk = TAILQ_FIRST(&asoc->send_queue);
2905 while (chk) {
2906 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2907 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2908 if (chk->data) {
2909 sctp_m_freem(chk->data);
2910 chk->data = NULL;
2911 }
2912 if (chk->whoTo)
2913 sctp_free_remote_addr(chk->whoTo);
2914 chk->whoTo = NULL;
2915 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2916 sctppcbinfo.ipi_count_chunk--;
2917 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2918 panic("Chunk count is negative");
2919 }
2920 sctppcbinfo.ipi_gencnt_chunk++;
2921 chk = TAILQ_FIRST(&asoc->send_queue);
2922 }
2923 }
2924 /* sent queue SHOULD be empty */
2925 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2926 chk = TAILQ_FIRST(&asoc->sent_queue);
2927 while (chk) {
2928 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2929 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2930 SCTP_NOTIFY_DATAGRAM_SENT, chk);
2931 if (chk->data) {
2932 sctp_m_freem(chk->data);
2933 chk->data = NULL;
2934 }
2935 if (chk->whoTo)
2936 sctp_free_remote_addr(chk->whoTo);
2937 chk->whoTo = NULL;
2938 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2939 sctppcbinfo.ipi_count_chunk--;
2940 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2941 panic("Chunk count is negative");
2942 }
2943 sctppcbinfo.ipi_gencnt_chunk++;
2944 chk = TAILQ_FIRST(&asoc->sent_queue);
2945 }
2946 }
2947 }
2948
2949 void
2950 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2951 {
2952
2953 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2954 return;
2955 }
2956 /* Tell them we lost the asoc */
2957 sctp_report_all_outbound(stcb);
2958 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2959 }
2960
2961 void
2962 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2963 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2964 {
2965 u_int32_t vtag;
2966
2967 vtag = 0;
2968 if (stcb != NULL) {
2969 /* We have a TCB to abort, send notification too */
2970 vtag = stcb->asoc.peer_vtag;
2971 sctp_abort_notification(stcb, 0);
2972 }
2973 sctp_send_abort(m, iphlen, sh, vtag, op_err);
2974 if (stcb != NULL) {
2975 /* Ok, now lets free it */
2976 sctp_free_assoc(inp, stcb);
2977 } else {
2978 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2979 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2980 sctp_inpcb_free(inp, 1);
2981 }
2982 }
2983 }
2984 }
2985
2986 void
2987 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2988 int error, struct mbuf *op_err)
2989 {
2990 if (stcb == NULL) {
2991 /* Got to have a TCB */
2992 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2993 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2994 sctp_inpcb_free(inp, 1);
2995 }
2996 }
2997 return;
2998 }
2999 /* notify the ulp */
3000 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3001 sctp_abort_notification(stcb, error);
3002 /* notify the peer */
3003 sctp_send_abort_tcb(stcb, op_err);
3004 /* now free the asoc */
3005 sctp_free_assoc(inp, stcb);
3006 }
3007
3008 void
3009 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3010 struct sctp_inpcb *inp, struct mbuf *op_err)
3011 {
3012 struct sctp_chunkhdr *ch, chunk_buf;
3013 unsigned int chk_length;
3014
3015 /* Generate a TO address for future reference */
3016 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3017 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3018 sctp_inpcb_free(inp, 1);
3019 }
3020 }
3021 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3022 sizeof(*ch), (u_int8_t *)&chunk_buf);
3023 while (ch != NULL) {
3024 chk_length = ntohs(ch->chunk_length);
3025 if (chk_length < sizeof(*ch)) {
3026 /* break to abort land */
3027 break;
3028 }
3029 switch (ch->chunk_type) {
3030 case SCTP_PACKET_DROPPED:
3031 /* we don't respond to pkt-dropped */
3032 return;
3033 case SCTP_ABORT_ASSOCIATION:
3034 /* we don't respond with an ABORT to an ABORT */
3035 return;
3036 case SCTP_SHUTDOWN_COMPLETE:
3037 /*
3038 * we ignore it since we are not waiting for it
3039 * and peer is gone
3040 */
3041 return;
3042 case SCTP_SHUTDOWN_ACK:
3043 sctp_send_shutdown_complete2(m, iphlen, sh);
3044 return;
3045 default:
3046 break;
3047 }
3048 offset += SCTP_SIZE32(chk_length);
3049 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3050 sizeof(*ch), (u_int8_t *)&chunk_buf);
3051 }
3052 sctp_send_abort(m, iphlen, sh, 0, op_err);
3053 }
3054
3055 /*
3056 * check the inbound datagram to make sure there is not an abort
3057 * inside it, if there is return 1, else return 0.
3058 */
3059 int
3060 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
3061 {
3062 struct sctp_chunkhdr *ch;
3063 struct sctp_init_chunk *init_chk, chunk_buf;
3064 int offset;
3065 unsigned int chk_length;
3066
3067 offset = iphlen + sizeof(struct sctphdr);
3068 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3069 (u_int8_t *)&chunk_buf);
3070 while (ch != NULL) {
3071 chk_length = ntohs(ch->chunk_length);
3072 if (chk_length < sizeof(*ch)) {
3073 /* packet is probably corrupt */
3074 break;
3075 }
3076 /* we seem to be ok, is it an abort? */
3077 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3078 /* yep, tell them */
3079 return (1);
3080 }
3081 if (ch->chunk_type == SCTP_INITIATION) {
3082 /* need to update the Vtag */
3083 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3084 offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
3085 if (init_chk != NULL) {
3086 *vtagfill = ntohl(init_chk->init.initiate_tag);
3087 }
3088 }
3089 /* Nope, move to the next chunk */
3090 offset += SCTP_SIZE32(chk_length);
3091 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3092 sizeof(*ch), (u_int8_t *)&chunk_buf);
3093 }
3094 return (0);
3095 }
3096
3097 /*
3098 * currently (2/02), ifa_addr embeds scope_id's and don't
3099 * have sin6_scope_id set (i.e. it's 0)
3100 * so, create this function to compare link local scopes
3101 */
3102 uint32_t
3103 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3104 {
3105 struct sockaddr_in6 a, b;
3106
3107 /* save copies */
3108 a = *addr1;
3109 b = *addr2;
3110
3111 if (a.sin6_scope_id == 0)
3112 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
3113 /* can't get scope, so can't match */
3114 return (0);
3115 }
3116 if (b.sin6_scope_id == 0)
3117 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
3118 /* can't get scope, so can't match */
3119 return (0);
3120 }
3121 if (a.sin6_scope_id != b.sin6_scope_id)
3122 return (0);
3123
3124 return (1);
3125 }
3126
3127 /*
3128 * returns a sockaddr_in6 with embedded scope recovered and removed
3129 */
3130 struct sockaddr_in6 *
3131 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3132 {
3133
3134 /* check and strip embedded scope junk */
3135 if (addr->sin6_family == AF_INET6) {
3136 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3137 if (addr->sin6_scope_id == 0) {
3138 *store = *addr;
3139 if (!in6_recoverscope(store, &store->sin6_addr,
3140 NULL)) {
3141 /* use the recovered scope */
3142 addr = store;
3143 }
3144 /* else, return the original "to" addr */
3145 }
3146 }
3147 }
3148 return (addr);
3149 }
3150
3151 /*
3152 * are the two addresses the same? currently a "scopeless" check
3153 * returns: 1 if same, 0 if not
3154 */
3155 int
3156 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3157 {
3158
3159 /* must be valid */
3160 if (sa1 == NULL || sa2 == NULL)
3161 return (0);
3162
3163 /* must be the same family */
3164 if (sa1->sa_family != sa2->sa_family)
3165 return (0);
3166
3167 if (sa1->sa_family == AF_INET6) {
3168 /* IPv6 addresses */
3169 struct sockaddr_in6 *sin6_1, *sin6_2;
3170
3171 sin6_1 = (struct sockaddr_in6 *)sa1;
3172 sin6_2 = (struct sockaddr_in6 *)sa2;
3173 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3174 &sin6_2->sin6_addr));
3175 } else if (sa1->sa_family == AF_INET) {
3176 /* IPv4 addresses */
3177 struct sockaddr_in *sin_1, *sin_2;
3178
3179 sin_1 = (struct sockaddr_in *)sa1;
3180 sin_2 = (struct sockaddr_in *)sa2;
3181 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3182 } else {
3183 /* we don't do these... */
3184 return (0);
3185 }
3186 }
3187
3188 void
3189 sctp_print_address(struct sockaddr *sa)
3190 {
3191
3192 if (sa->sa_family == AF_INET6) {
3193 struct sockaddr_in6 *sin6;
3194 sin6 = (struct sockaddr_in6 *)sa;
3195 kprintf("IPv6 address: %s:%d scope:%u\n",
3196 ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port),
3197 sin6->sin6_scope_id);
3198 } else if (sa->sa_family == AF_INET) {
3199 struct sockaddr_in *sin;
3200 sin = (struct sockaddr_in *)sa;
3201 kprintf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3202 ntohs(sin->sin_port));
3203 } else {
3204 kprintf("?\n");
3205 }
3206 }
3207
3208 void
3209 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3210 {
3211 if (iph->ip_v == IPVERSION) {
3212 struct sockaddr_in lsa, fsa;
3213
3214 bzero(&lsa, sizeof(lsa));
3215 lsa.sin_len = sizeof(lsa);
3216 lsa.sin_family = AF_INET;
3217 lsa.sin_addr = iph->ip_src;
3218 lsa.sin_port = sh->src_port;
3219 bzero(&fsa, sizeof(fsa));
3220 fsa.sin_len = sizeof(fsa);
3221 fsa.sin_family = AF_INET;
3222 fsa.sin_addr = iph->ip_dst;
3223 fsa.sin_port = sh->dest_port;
3224 kprintf("src: ");
3225 sctp_print_address((struct sockaddr *)&lsa);
3226 kprintf("dest: ");
3227 sctp_print_address((struct sockaddr *)&fsa);
3228 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3229 struct ip6_hdr *ip6;
3230 struct sockaddr_in6 lsa6, fsa6;
3231
3232 ip6 = (struct ip6_hdr *)iph;
3233 bzero(&lsa6, sizeof(lsa6));
3234 lsa6.sin6_len = sizeof(lsa6);
3235 lsa6.sin6_family = AF_INET6;
3236 lsa6.sin6_addr = ip6->ip6_src;
3237 lsa6.sin6_port = sh->src_port;
3238 bzero(&fsa6, sizeof(fsa6));
3239 fsa6.sin6_len = sizeof(fsa6);
3240 fsa6.sin6_family = AF_INET6;
3241 fsa6.sin6_addr = ip6->ip6_dst;
3242 fsa6.sin6_port = sh->dest_port;
3243 kprintf("src: ");
3244 sctp_print_address((struct sockaddr *)&lsa6);
3245 kprintf("dest: ");
3246 sctp_print_address((struct sockaddr *)&fsa6);
3247 }
3248 }
3249
3250 #if defined(__FreeBSD__) || defined(__APPLE__)
3251
3252 /* cloned from uipc_socket.c */
3253
3254 #define SCTP_SBLINKRECORD(sb, m0) do { \
3255 if ((sb)->sb_lastrecord != NULL) \
3256 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3257 else \
3258 (sb)->sb_mb = (m0); \
3259 (sb)->sb_lastrecord = (m0); \
3260 } while (/*CONSTCOND*/0)
3261 #endif
3262
3263
3264 int
3265 sctp_sbappendaddr_nocheck(struct signalsockbuf *ssb, struct sockaddr *asa, struct mbuf *m0,
3266 struct mbuf *control, u_int32_t tag,
3267 struct sctp_inpcb *inp)
3268 {
3269 struct mbuf *m, *n;
3270 int cnt=0;
3271
3272 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3273 panic("sctp_sbappendaddr_nocheck");
3274
3275 for (n = control; n; n = n->m_next) {
3276 if (n->m_next == 0) /* get pointer to last control buf */
3277 break;
3278 }
3279 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3280 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3281 uint32_t len = asa->sa_len; /* workaround GCC stupidity */
3282 if (len > MHLEN)
3283 return (0);
3284 try_again:
3285 MGETHDR(m, MB_DONTWAIT, MT_SONAME);
3286 if (m == NULL)
3287 return (0);
3288 m->m_len = 0;
3289 /* safety */
3290 if (m == m0) {
3291 kprintf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3292 m0, m);
3293 if (cnt) {
3294 panic("more than once");
3295 }
3296 cnt++;
3297 goto try_again;
3298 }
3299 m->m_len = asa->sa_len;
3300 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
3301 }
3302 else {
3303 m = NULL;
3304 }
3305 if (n)
3306 n->m_next = m0; /* concatenate data to control */
3307 else
3308 control = m0;
3309 if (m)
3310 m->m_next = control;
3311 else
3312 m = control;
3313 m->m_pkthdr.csum_data = (int)tag;
3314
3315 SOCKBUF_LOCK(ssb);
3316 for (n = m; n; n = n->m_next)
3317 sballoc(&ssb->sb, n);
3318 if (ssb->ssb_mb == NULL) {
3319 inp->sctp_vtag_first = tag;
3320 }
3321 if ((n = ssb->ssb_mb) != NULL) {
3322 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3323 inp->sb_last_mpkt = NULL;
3324 }
3325 if (inp->sb_last_mpkt)
3326 inp->sb_last_mpkt->m_nextpkt = m;
3327 else {
3328 while (n->m_nextpkt) {
3329 n = n->m_nextpkt;
3330 }
3331 n->m_nextpkt = m;
3332 }
3333 inp->sb_last_mpkt = m;
3334 } else {
3335 inp->sb_last_mpkt = ssb->ssb_mb = m;
3336 inp->sctp_vtag_first = tag;
3337 }
3338 SOCKBUF_UNLOCK(ssb);
3339 return (1);
3340 }
3341
3342 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3343 *************ALTERNATE ROUTING CODE
3344 */
3345
3346 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3347 *************ALTERNATE ROUTING CODE
3348 */
3349
3350 struct mbuf *
3351 sctp_generate_invmanparam(int err)
3352 {
3353 /* Return a MBUF with a invalid mandatory parameter */
3354 struct mbuf *m;
3355
3356 MGET(m, MB_DONTWAIT, MT_DATA);
3357 if (m) {
3358 struct sctp_paramhdr *ph;
3359 m->m_len = sizeof(struct sctp_paramhdr);
3360 ph = mtod(m, struct sctp_paramhdr *);
3361 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3362 ph->param_type = htons(err);
3363 }
3364 return (m);
3365 }
3366
3367 static int
3368 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3369 {
3370 struct mbuf *m;
3371 /*
3372 * given a mbuf chain, look through it finding
3373 * the M_PKTHDR and return 1 if it belongs to
3374 * the association given. We tell this by
3375 * a kludge where we stuff the my_vtag of the asoc
3376 * into the m->m_pkthdr.csum_data/csum field.
3377 */
3378 m = this;
3379 while (m) {
3380 if (m->m_flags & M_PKTHDR) {
3381 /* check it */
3382 #if defined(__OpenBSD__)
3383 if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3384 #else
3385 if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3386 #endif
3387 {
3388 /* Yep */
3389 return (1);
3390 }
3391 }
3392 m = m->m_next;
3393 }
3394 return (0);
3395 }
3396
3397 u_int32_t
3398 sctp_get_first_vtag_from_sb(struct socket *so)
3399 {
3400 struct mbuf *this, *at;
3401 u_int32_t retval;
3402
3403 retval = 0;
3404 lwkt_gettoken(&so->so_rcv.ssb_token);
3405 if (so->so_rcv.ssb_mb) {
3406 /* grubbing time */
3407 this = so->so_rcv.ssb_mb;
3408 while (this) {
3409 at = this;
3410 /* get to the m_pkthdr */
3411 while (at) {
3412 if (at->m_flags & M_PKTHDR)
3413 break;
3414 else {
3415 at = at->m_next;
3416 }
3417 }
3418 /* now do we have a m_pkthdr */
3419 if (at && (at->m_flags & M_PKTHDR)) {
3420 /* check it */
3421 #if defined(__OpenBSD__)
3422 if ((u_int32_t)at->m_pkthdr.csum != 0)
3423 #else
3424 if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3425 #endif
3426 {
3427 /* its the one */
3428 #if defined(__OpenBSD__)
3429 retval = (u_int32_t)at->m_pkthdr.csum;
3430 #else
3431 retval =
3432 (u_int32_t)at->m_pkthdr.csum_data;
3433 #endif
3434 break;
3435 }
3436 }
3437 this = this->m_nextpkt;
3438 }
3439
3440 }
3441 lwkt_reltoken(&so->so_rcv.ssb_token);
3442 return (retval);
3443
3444 }
3445 void
3446 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3447 struct socket *new, struct sctp_tcb *stcb)
3448 {
3449 struct mbuf **put, **take, *next, *this;
3450 struct signalsockbuf *old_sb, *new_sb;
3451 struct sctp_association *asoc;
3452 int moved_top = 0;
3453
3454 asoc = &stcb->asoc;
3455 old_sb = &old->so_rcv;
3456 new_sb = &new->so_rcv;
3457 if (old_sb->ssb_mb == NULL) {
3458 /* Nothing to move */
3459 return;
3460 }
3461 SOCKBUF_LOCK(old_sb);
3462 SOCKBUF_LOCK(new_sb);
3463 lwkt_gettoken(&old_sb->ssb_token);
3464 lwkt_gettoken(&new_sb->ssb_token);
3465
3466 if (inp->sctp_vtag_first == asoc->my_vtag) {
3467 /* First one must be moved */
3468 struct mbuf *mm;
3469 for (mm = old_sb->ssb_mb; mm; mm = mm->m_next) {
3470 /*
3471 * Go down the chain and fix
3472 * the space allocation of the
3473 * two sockets.
3474 */
3475 sbfree(&old_sb->sb, mm);
3476 sballoc(&new_sb->sb, mm);
3477 }
3478 new_sb->ssb_mb = old_sb->ssb_mb;
3479 old_sb->ssb_mb = new_sb->ssb_mb->m_nextpkt;
3480 new_sb->ssb_mb->m_nextpkt = NULL;
3481 put = &new_sb->ssb_mb->m_nextpkt;
3482 moved_top = 1;
3483 } else {
3484 put = &new_sb->ssb_mb;
3485 }
3486
3487 take = &old_sb->ssb_mb;
3488 next = old_sb->ssb_mb;
3489 while (next) {
3490 this = next;
3491 /* postion for next one */
3492 next = this->m_nextpkt;
3493 /* check the tag of this packet */
3494 if (sctp_should_be_moved(this, asoc)) {
3495 /* yes this needs to be moved */
3496 struct mbuf *mm;
3497 *take = this->m_nextpkt;
3498 this->m_nextpkt = NULL;
3499 *put = this;
3500 for (mm = this; mm; mm = mm->m_next) {
3501 /*
3502 * Go down the chain and fix
3503 * the space allocation of the
3504 * two sockets.
3505 */
3506 sbfree(&old_sb->sb, mm);
3507 sballoc(&new_sb->sb, mm);
3508 }
3509 put = &this->m_nextpkt;
3510
3511 } else {
3512 /* no advance our take point. */
3513 take = &this->m_nextpkt;
3514 }
3515 }
3516 if (moved_top) {
3517 /*
3518 * Ok so now we must re-postion vtag_first to
3519 * match the new first one since we moved the
3520 * mbuf at the top.
3521 */
3522 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3523 }
3524 lwkt_reltoken(&new_sb->ssb_token);
3525 lwkt_reltoken(&old_sb->ssb_token);
3526 SOCKBUF_UNLOCK(old_sb);
3527 SOCKBUF_UNLOCK(new_sb);
3528 }
3529
3530 void
3531 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3532 struct sctp_tmit_chunk *tp1)
3533 {
3534 if (tp1->data == NULL) {
3535 return;
3536 }
3537 #ifdef SCTP_MBCNT_LOGGING
3538 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3539 asoc->total_output_queue_size,
3540 tp1->book_size,
3541 asoc->total_output_mbuf_queue_size,
3542 tp1->mbcnt);
3543 #endif
3544 if (asoc->total_output_queue_size >= tp1->book_size) {
3545 asoc->total_output_queue_size -= tp1->book_size;
3546 } else {
3547 asoc->total_output_queue_size = 0;
3548 }
3549
3550 /* Now free the mbuf */
3551 if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3552 asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3553 } else {
3554 asoc->total_output_mbuf_queue_size = 0;
3555 }
3556 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3557 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3558 if (stcb->sctp_socket->so_snd.ssb_cc >= tp1->book_size) {
3559 stcb->sctp_socket->so_snd.ssb_cc -= tp1->book_size;
3560 } else {
3561 stcb->sctp_socket->so_snd.ssb_cc = 0;
3562
3563 }
3564 if (stcb->sctp_socket->so_snd.ssb_mbcnt >= tp1->mbcnt) {
3565 stcb->sctp_socket->so_snd.ssb_mbcnt -= tp1->mbcnt;
3566 } else {
3567 stcb->sctp_socket->so_snd.ssb_mbcnt = 0;
3568 }
3569 }
3570 }
3571
3572 int
3573 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3574 int reason, struct sctpchunk_listhead *queue)
3575 {
3576 int ret_sz = 0;
3577 int notdone;
3578 uint8_t foundeom = 0;
3579
3580 do {
3581 ret_sz += tp1->book_size;
3582 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3583 if (tp1->data) {
3584 sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3585 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3586 sctp_m_freem(tp1->data);
3587 tp1->data = NULL;
3588 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3589 }
3590 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3591 stcb->asoc.sent_queue_cnt_removeable--;
3592 }
3593 if (queue == &stcb->asoc.send_queue) {
3594 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3595 /* on to the sent queue */
3596 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3597 sctp_next);
3598 stcb->asoc.sent_queue_cnt++;
3599 }
3600 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3601 SCTP_DATA_NOT_FRAG) {
3602 /* not frag'ed we ae done */
3603 notdone = 0;
3604 foundeom = 1;
3605 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3606 /* end of frag, we are done */
3607 notdone = 0;
3608 foundeom = 1;
3609 } else {
3610 /* Its a begin or middle piece, we must mark all of it */
3611 notdone = 1;
3612 tp1 = TAILQ_NEXT(tp1, sctp_next);
3613 }
3614 } while (tp1 && notdone);
3615 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3616 /*
3617 * The multi-part message was scattered
3618 * across the send and sent queue.
3619 */
3620 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3621 /*
3622 * recurse throught the send_queue too, starting at the
3623 * beginning.
3624 */
3625 if (tp1) {
3626 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3627 &stcb->asoc.send_queue);
3628 } else {
3629 kprintf("hmm, nothing on the send queue and no EOM?\n");
3630 }
3631 }
3632 return (ret_sz);
3633 }
3634
3635 /*
3636 * checks to see if the given address, sa, is one that is currently
3637 * known by the kernel
3638 * note: can't distinguish the same address on multiple interfaces and
3639 * doesn't handle multiple addresses with different zone/scope id's
3640 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3641 */
3642 struct ifaddr *
3643 sctp_find_ifa_by_addr(struct sockaddr *sa)
3644 {
3645 struct ifnet *ifn;
3646
3647 /* go through all our known interfaces */
3648 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3649 struct ifaddr_container *ifac;
3650
3651 /* go through each interface addresses */
3652 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
3653 struct ifaddr *ifa = ifac->ifa;
3654
3655 /* correct family? */
3656 if (ifa->ifa_addr->sa_family != sa->sa_family)
3657 continue;
3658
3659 #ifdef INET6
3660 if (ifa->ifa_addr->sa_family == AF_INET6) {
3661 /* IPv6 address */
3662 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3663 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3664 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3665 /* create a copy and clear scope */
3666 memcpy(&sin6_tmp, sin1,
3667 sizeof(struct sockaddr_in6));
3668 sin1 = &sin6_tmp;
3669 in6_clearscope(&sin1->sin6_addr);
3670 }
3671 sin2 = (struct sockaddr_in6 *)sa;
3672 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3673 sizeof(struct in6_addr)) == 0) {
3674 /* found it */
3675 return (ifa);
3676 }
3677 } else
3678 #endif
3679 if (ifa->ifa_addr->sa_family == AF_INET) {
3680 /* IPv4 address */
3681 struct sockaddr_in *sin1, *sin2;
3682 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3683 sin2 = (struct sockaddr_in *)sa;
3684 if (sin1->sin_addr.s_addr ==
3685 sin2->sin_addr.s_addr) {
3686 /* found it */
3687 return (ifa);
3688 }
3689 }
3690 /* else, not AF_INET or AF_INET6, so skip */
3691 } /* end foreach ifa */
3692 } /* end foreach ifn */
3693 /* not found! */
3694 return (NULL);
3695 }
3696
3697
3698 #ifdef __APPLE__
3699 /*
3700 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3701 * in the chain is a M_PKTHDR and the length is zero
3702 */
3703 static void
3704 sctp_pkthdr_fix(struct mbuf *m)
3705 {
3706 struct mbuf *m_nxt;
3707
3708 if ((m->m_flags & M_PKTHDR) == 0) {
3709 /* not a PKTHDR */
3710 return;
3711 }
3712
3713 if (m->m_len != 0) {
3714 /* not a zero length PKTHDR mbuf */
3715 return;
3716 }
3717
3718 /* let's move in a word into the first mbuf... yes, ugly! */
3719 m_nxt = m->m_next;
3720 if (m_nxt == NULL) {
3721 /* umm... not a very useful mbuf chain... */
3722 return;
3723 }
3724 if ((size_t)m_nxt->m_len > sizeof(long)) {
3725 /* move over a long */
3726 bcopy(mtod(m_nxt, caddr_t), mtod(m, caddr_t), sizeof(long));
3727 /* update mbuf data pointers and lengths */
3728 m->m_len += sizeof(long);
3729 m_nxt->m_data += sizeof(long);
3730 m_nxt->m_len -= sizeof(long);
3731 }
3732 }
3733
3734 inline struct mbuf *
3735 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3736 {
3737 sctp_pkthdr_fix(m);
3738 return (m_copym(m, off, len, wait));
3739 }
3740 #endif /* __APPLE__ */
Cache object: b0a50d738fac76b7f5ef9d8e42aeed7b
|