1 /*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.2/sys/netinet/sctp_indata.c 216613 2010-12-21 10:43:51Z tuexen $");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47
48
49 /*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 uint32_t calc = 0;
70
71 /*
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
76 */
77 if (stcb->sctp_socket == NULL)
78 return (calc);
79
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 return (calc);
86 }
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90 /*
91 * take out what has NOT been put on socket queue and we yet hold
92 * for putting up.
93 */
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
98
99 if (calc == 0) {
100 /* out of space */
101 return (calc);
102 }
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 /*
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
108 */
109 if (calc < stcb->asoc.my_rwnd_control_len) {
110 calc = 1;
111 }
112 return (calc);
113 }
114
115
116
117 /*
118 * Build out our readq entry based on the incoming packet.
119 */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
126 struct mbuf *dm)
127 {
128 struct sctp_queued_to_read *read_queue_e = NULL;
129
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
132 goto failed_build;
133 }
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = stcb->asoc.context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 return (read_queue_e);
158 }
159
160
161 /*
162 * Build out our readq entry based on the incoming packet.
163 */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
167 {
168 struct sctp_queued_to_read *read_queue_e = NULL;
169
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
172 goto failed_build;
173 }
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 return (read_queue_e);
198 }
199
200
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203 struct sctp_sndrcvinfo *sinfo)
204 {
205 struct sctp_sndrcvinfo *outinfo;
206 struct cmsghdr *cmh;
207 struct mbuf *ret;
208 int len;
209 int use_extended = 0;
210
211 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 /* user does not want the sndrcv ctl */
213 return (NULL);
214 }
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
216 use_extended = 1;
217 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
218 } else {
219 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
220 }
221
222
223 ret = sctp_get_mbuf_for_msg(len,
224 0, M_DONTWAIT, 1, MT_DATA);
225
226 if (ret == NULL) {
227 /* No space */
228 return (ret);
229 }
230 /* We need a CMSG header followed by the struct */
231 cmh = mtod(ret, struct cmsghdr *);
232 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 cmh->cmsg_level = IPPROTO_SCTP;
234 if (use_extended) {
235 cmh->cmsg_type = SCTP_EXTRCV;
236 cmh->cmsg_len = len;
237 memcpy(outinfo, sinfo, len);
238 } else {
239 cmh->cmsg_type = SCTP_SNDRCV;
240 cmh->cmsg_len = len;
241 *outinfo = *sinfo;
242 }
243 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
244 return (ret);
245 }
246
247
248 char *
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
250 int *control_len,
251 struct sctp_sndrcvinfo *sinfo)
252 {
253 struct sctp_sndrcvinfo *outinfo;
254 struct cmsghdr *cmh;
255 char *buf;
256 int len;
257 int use_extended = 0;
258
259 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 /* user does not want the sndrcv ctl */
261 return (NULL);
262 }
263 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
264 use_extended = 1;
265 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
266 } else {
267 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 }
269 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
270 if (buf == NULL) {
271 /* No space */
272 return (buf);
273 }
274 /* We need a CMSG header followed by the struct */
275 cmh = (struct cmsghdr *)buf;
276 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 cmh->cmsg_level = IPPROTO_SCTP;
278 if (use_extended) {
279 cmh->cmsg_type = SCTP_EXTRCV;
280 cmh->cmsg_len = len;
281 memcpy(outinfo, sinfo, len);
282 } else {
283 cmh->cmsg_type = SCTP_SNDRCV;
284 cmh->cmsg_len = len;
285 *outinfo = *sinfo;
286 }
287 *control_len = len;
288 return (buf);
289 }
290
291 static void
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
293 {
294 uint32_t gap, i, cumackp1;
295 int fnd = 0;
296
297 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 return;
299 }
300 cumackp1 = asoc->cumulative_tsn + 1;
301 if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
302 /*
303 * this tsn is behind the cum ack and thus we don't need to
304 * worry about it being moved from one to the other.
305 */
306 return;
307 }
308 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 printf("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
312 #ifdef INVARIANTS
313 panic("Things are really messed up now!!");
314 #endif
315 }
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
320 }
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
324 (i == asoc->mapping_array_base_tsn)); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
328 fnd = 1;
329 break;
330 }
331 }
332 if (!fnd) {
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 }
335 }
336 }
337
338
339 /*
340 * We are delivering currently from the reassembly queue. We must continue to
341 * deliver until we either: 1) run out of space. 2) run out of sequential
342 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
343 */
344 static void
345 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
346 {
347 struct sctp_tmit_chunk *chk;
348 uint16_t nxt_todel;
349 uint16_t stream_no;
350 int end = 0;
351 int cntDel;
352
353 struct sctp_queued_to_read *control, *ctl, *ctlat;
354
355 if (stcb == NULL)
356 return;
357
358 cntDel = stream_no = 0;
359 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
360 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
361 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
362 /* socket above is long gone or going.. */
363 abandon:
364 asoc->fragmented_delivery_inprogress = 0;
365 chk = TAILQ_FIRST(&asoc->reasmqueue);
366 while (chk) {
367 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
368 asoc->size_on_reasm_queue -= chk->send_size;
369 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
370 /*
371 * Lose the data pointer, since its in the socket
372 * buffer
373 */
374 if (chk->data) {
375 sctp_m_freem(chk->data);
376 chk->data = NULL;
377 }
378 /* Now free the address and data */
379 sctp_free_a_chunk(stcb, chk);
380 /* sa_ignore FREED_MEMORY */
381 chk = TAILQ_FIRST(&asoc->reasmqueue);
382 }
383 return;
384 }
385 SCTP_TCB_LOCK_ASSERT(stcb);
386 do {
387 chk = TAILQ_FIRST(&asoc->reasmqueue);
388 if (chk == NULL) {
389 return;
390 }
391 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
392 /* Can't deliver more :< */
393 return;
394 }
395 stream_no = chk->rec.data.stream_number;
396 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
397 if (nxt_todel != chk->rec.data.stream_seq &&
398 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
399 /*
400 * Not the next sequence to deliver in its stream OR
401 * unordered
402 */
403 return;
404 }
405 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
406
407 control = sctp_build_readq_entry_chk(stcb, chk);
408 if (control == NULL) {
409 /* out of memory? */
410 return;
411 }
412 /* save it off for our future deliveries */
413 stcb->asoc.control_pdapi = control;
414 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
415 end = 1;
416 else
417 end = 0;
418 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 sctp_add_to_readq(stcb->sctp_ep,
420 stcb, control, &stcb->sctp_socket->so_rcv, end,
421 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
422 cntDel++;
423 } else {
424 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
425 end = 1;
426 else
427 end = 0;
428 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
429 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
430 stcb->asoc.control_pdapi,
431 chk->data, end, chk->rec.data.TSN_seq,
432 &stcb->sctp_socket->so_rcv)) {
433 /*
434 * something is very wrong, either
435 * control_pdapi is NULL, or the tail_mbuf
436 * is corrupt, or there is a EOM already on
437 * the mbuf chain.
438 */
439 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
440 goto abandon;
441 } else {
442 #ifdef INVARIANTS
443 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
444 panic("This should not happen control_pdapi NULL?");
445 }
446 /* if we did not panic, it was a EOM */
447 panic("Bad chunking ??");
448 #else
449 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
450 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
451 }
452 SCTP_PRINTF("Bad chunking ??\n");
453 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
454
455 #endif
456 goto abandon;
457 }
458 }
459 cntDel++;
460 }
461 /* pull it we did it */
462 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
463 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
464 asoc->fragmented_delivery_inprogress = 0;
465 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
466 asoc->strmin[stream_no].last_sequence_delivered++;
467 }
468 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
469 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
470 }
471 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
472 /*
473 * turn the flag back on since we just delivered
474 * yet another one.
475 */
476 asoc->fragmented_delivery_inprogress = 1;
477 }
478 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
479 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
480 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
481 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
482
483 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
484 asoc->size_on_reasm_queue -= chk->send_size;
485 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
486 /* free up the chk */
487 chk->data = NULL;
488 sctp_free_a_chunk(stcb, chk);
489
490 if (asoc->fragmented_delivery_inprogress == 0) {
491 /*
492 * Now lets see if we can deliver the next one on
493 * the stream
494 */
495 struct sctp_stream_in *strm;
496
497 strm = &asoc->strmin[stream_no];
498 nxt_todel = strm->last_sequence_delivered + 1;
499 ctl = TAILQ_FIRST(&strm->inqueue);
500 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
501 while (ctl != NULL) {
502 /* Deliver more if we can. */
503 if (nxt_todel == ctl->sinfo_ssn) {
504 ctlat = TAILQ_NEXT(ctl, next);
505 TAILQ_REMOVE(&strm->inqueue, ctl, next);
506 asoc->size_on_all_streams -= ctl->length;
507 sctp_ucount_decr(asoc->cnt_on_all_streams);
508 strm->last_sequence_delivered++;
509 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
510 sctp_add_to_readq(stcb->sctp_ep, stcb,
511 ctl,
512 &stcb->sctp_socket->so_rcv, 1,
513 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
514 ctl = ctlat;
515 } else {
516 break;
517 }
518 nxt_todel = strm->last_sequence_delivered + 1;
519 }
520 }
521 break;
522 }
523 /* sa_ignore FREED_MEMORY */
524 chk = TAILQ_FIRST(&asoc->reasmqueue);
525 } while (chk);
526 }
527
528 /*
529 * Queue the chunk either right into the socket buffer if it is the next one
530 * to go OR put it in the correct place in the delivery queue. If we do
531 * append to the so_buf, keep doing so until we are out of order. One big
532 * question still remains, what to do when the socket buffer is FULL??
533 */
534 static void
535 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
536 struct sctp_queued_to_read *control, int *abort_flag)
537 {
538 /*
539 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
540 * all the data in one stream this could happen quite rapidly. One
541 * could use the TSN to keep track of things, but this scheme breaks
542 * down in the other type of stream useage that could occur. Send a
543 * single msg to stream 0, send 4Billion messages to stream 1, now
544 * send a message to stream 0. You have a situation where the TSN
545 * has wrapped but not in the stream. Is this worth worrying about
546 * or should we just change our queue sort at the bottom to be by
547 * TSN.
548 *
549 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
550 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
551 * assignment this could happen... and I don't see how this would be
552 * a violation. So for now I am undecided an will leave the sort by
553 * SSN alone. Maybe a hybred approach is the answer
554 *
555 */
556 struct sctp_stream_in *strm;
557 struct sctp_queued_to_read *at;
558 int queue_needed;
559 uint16_t nxt_todel;
560 struct mbuf *oper;
561
562 queue_needed = 1;
563 asoc->size_on_all_streams += control->length;
564 sctp_ucount_incr(asoc->cnt_on_all_streams);
565 strm = &asoc->strmin[control->sinfo_stream];
566 nxt_todel = strm->last_sequence_delivered + 1;
567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
568 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
569 }
570 SCTPDBG(SCTP_DEBUG_INDATA1,
571 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
572 (uint32_t) control->sinfo_stream,
573 (uint32_t) strm->last_sequence_delivered,
574 (uint32_t) nxt_todel);
575 if (compare_with_wrap(strm->last_sequence_delivered,
576 control->sinfo_ssn, MAX_SEQ) ||
577 (strm->last_sequence_delivered == control->sinfo_ssn)) {
578 /* The incoming sseq is behind where we last delivered? */
579 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
580 control->sinfo_ssn, strm->last_sequence_delivered);
581 protocol_error:
582 /*
583 * throw it in the stream so it gets cleaned up in
584 * association destruction
585 */
586 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
587 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
588 0, M_DONTWAIT, 1, MT_DATA);
589 if (oper) {
590 struct sctp_paramhdr *ph;
591 uint32_t *ippp;
592
593 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
594 (sizeof(uint32_t) * 3);
595 ph = mtod(oper, struct sctp_paramhdr *);
596 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
597 ph->param_length = htons(SCTP_BUF_LEN(oper));
598 ippp = (uint32_t *) (ph + 1);
599 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
600 ippp++;
601 *ippp = control->sinfo_tsn;
602 ippp++;
603 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
604 }
605 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
606 sctp_abort_an_association(stcb->sctp_ep, stcb,
607 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
608
609 *abort_flag = 1;
610 return;
611
612 }
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 }
618 /* EY it wont be queued if it could be delivered directly */
619 queue_needed = 0;
620 asoc->size_on_all_streams -= control->length;
621 sctp_ucount_decr(asoc->cnt_on_all_streams);
622 strm->last_sequence_delivered++;
623
624 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 sctp_add_to_readq(stcb->sctp_ep, stcb,
626 control,
627 &stcb->sctp_socket->so_rcv, 1,
628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 control = TAILQ_FIRST(&strm->inqueue);
630 while (control != NULL) {
631 /* all delivered */
632 nxt_todel = strm->last_sequence_delivered + 1;
633 if (nxt_todel == control->sinfo_ssn) {
634 at = TAILQ_NEXT(control, next);
635 TAILQ_REMOVE(&strm->inqueue, control, next);
636 asoc->size_on_all_streams -= control->length;
637 sctp_ucount_decr(asoc->cnt_on_all_streams);
638 strm->last_sequence_delivered++;
639 /*
640 * We ignore the return of deliver_data here
641 * since we always can hold the chunk on the
642 * d-queue. And we have a finite number that
643 * can be delivered from the strq.
644 */
645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
646 sctp_log_strm_del(control, NULL,
647 SCTP_STR_LOG_FROM_IMMED_DEL);
648 }
649 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
650 sctp_add_to_readq(stcb->sctp_ep, stcb,
651 control,
652 &stcb->sctp_socket->so_rcv, 1,
653 SCTP_READ_LOCK_NOT_HELD,
654 SCTP_SO_NOT_LOCKED);
655 control = at;
656 continue;
657 }
658 break;
659 }
660 }
661 if (queue_needed) {
662 /*
663 * Ok, we did not deliver this guy, find the correct place
664 * to put it on the queue.
665 */
666 if ((compare_with_wrap(asoc->cumulative_tsn,
667 control->sinfo_tsn, MAX_TSN)) ||
668 (control->sinfo_tsn == asoc->cumulative_tsn)) {
669 goto protocol_error;
670 }
671 if (TAILQ_EMPTY(&strm->inqueue)) {
672 /* Empty queue */
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
675 }
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
677 } else {
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (compare_with_wrap(at->sinfo_ssn,
680 control->sinfo_ssn, MAX_SEQ)) {
681 /*
682 * one in queue is bigger than the
683 * new one, insert before this one
684 */
685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
686 sctp_log_strm_del(control, at,
687 SCTP_STR_LOG_FROM_INSERT_MD);
688 }
689 TAILQ_INSERT_BEFORE(at, control, next);
690 break;
691 } else if (at->sinfo_ssn == control->sinfo_ssn) {
692 /*
693 * Gak, He sent me a duplicate str
694 * seq number
695 */
696 /*
697 * foo bar, I guess I will just free
698 * this new guy, should we abort
699 * too? FIX ME MAYBE? Or it COULD be
700 * that the SSN's have wrapped.
701 * Maybe I should compare to TSN
702 * somehow... sigh for now just blow
703 * away the chunk!
704 */
705
706 if (control->data)
707 sctp_m_freem(control->data);
708 control->data = NULL;
709 asoc->size_on_all_streams -= control->length;
710 sctp_ucount_decr(asoc->cnt_on_all_streams);
711 if (control->whoFrom) {
712 sctp_free_remote_addr(control->whoFrom);
713 control->whoFrom = NULL;
714 }
715 sctp_free_a_readq(stcb, control);
716 return;
717 } else {
718 if (TAILQ_NEXT(at, next) == NULL) {
719 /*
720 * We are at the end, insert
721 * it after this one
722 */
723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
724 sctp_log_strm_del(control, at,
725 SCTP_STR_LOG_FROM_INSERT_TL);
726 }
727 TAILQ_INSERT_AFTER(&strm->inqueue,
728 at, control, next);
729 break;
730 }
731 }
732 }
733 }
734 }
735 }
736
737 /*
738 * Returns two things: You get the total size of the deliverable parts of the
739 * first fragmented message on the reassembly queue. And you get a 1 back if
740 * all of the message is ready or a 0 back if the message is still incomplete
741 */
742 static int
743 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
744 {
745 struct sctp_tmit_chunk *chk;
746 uint32_t tsn;
747
748 *t_size = 0;
749 chk = TAILQ_FIRST(&asoc->reasmqueue);
750 if (chk == NULL) {
751 /* nothing on the queue */
752 return (0);
753 }
754 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
755 /* Not a first on the queue */
756 return (0);
757 }
758 tsn = chk->rec.data.TSN_seq;
759 while (chk) {
760 if (tsn != chk->rec.data.TSN_seq) {
761 return (0);
762 }
763 *t_size += chk->send_size;
764 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
765 return (1);
766 }
767 tsn++;
768 chk = TAILQ_NEXT(chk, sctp_next);
769 }
770 return (0);
771 }
772
773 static void
774 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
775 {
776 struct sctp_tmit_chunk *chk;
777 uint16_t nxt_todel;
778 uint32_t tsize, pd_point;
779
780 doit_again:
781 chk = TAILQ_FIRST(&asoc->reasmqueue);
782 if (chk == NULL) {
783 /* Huh? */
784 asoc->size_on_reasm_queue = 0;
785 asoc->cnt_on_reasm_queue = 0;
786 return;
787 }
788 if (asoc->fragmented_delivery_inprogress == 0) {
789 nxt_todel =
790 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
791 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
792 (nxt_todel == chk->rec.data.stream_seq ||
793 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
794 /*
795 * Yep the first one is here and its ok to deliver
796 * but should we?
797 */
798 if (stcb->sctp_socket) {
799 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
800 stcb->sctp_ep->partial_delivery_point);
801 } else {
802 pd_point = stcb->sctp_ep->partial_delivery_point;
803 }
804 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
805
806 /*
807 * Yes, we setup to start reception, by
808 * backing down the TSN just in case we
809 * can't deliver. If we
810 */
811 asoc->fragmented_delivery_inprogress = 1;
812 asoc->tsn_last_delivered =
813 chk->rec.data.TSN_seq - 1;
814 asoc->str_of_pdapi =
815 chk->rec.data.stream_number;
816 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
817 asoc->pdapi_ppid = chk->rec.data.payloadtype;
818 asoc->fragment_flags = chk->rec.data.rcv_flags;
819 sctp_service_reassembly(stcb, asoc);
820 }
821 }
822 } else {
823 /*
824 * Service re-assembly will deliver stream data queued at
825 * the end of fragmented delivery.. but it wont know to go
826 * back and call itself again... we do that here with the
827 * got doit_again
828 */
829 sctp_service_reassembly(stcb, asoc);
830 if (asoc->fragmented_delivery_inprogress == 0) {
831 /*
832 * finished our Fragmented delivery, could be more
833 * waiting?
834 */
835 goto doit_again;
836 }
837 }
838 }
839
840 /*
841 * Dump onto the re-assembly queue, in its proper place. After dumping on the
842 * queue, see if anthing can be delivered. If so pull it off (or as much as
843 * we can. If we run out of space then we must dump what we can and set the
844 * appropriate flag to say we queued what we could.
845 */
846 static void
847 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
848 struct sctp_tmit_chunk *chk, int *abort_flag)
849 {
850 struct mbuf *oper;
851 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
852 u_char last_flags;
853 struct sctp_tmit_chunk *at, *prev, *next;
854
855 prev = next = NULL;
856 cum_ackp1 = asoc->tsn_last_delivered + 1;
857 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
858 /* This is the first one on the queue */
859 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
860 /*
861 * we do not check for delivery of anything when only one
862 * fragment is here
863 */
864 asoc->size_on_reasm_queue = chk->send_size;
865 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
866 if (chk->rec.data.TSN_seq == cum_ackp1) {
867 if (asoc->fragmented_delivery_inprogress == 0 &&
868 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
869 SCTP_DATA_FIRST_FRAG) {
870 /*
871 * An empty queue, no delivery inprogress,
872 * we hit the next one and it does NOT have
873 * a FIRST fragment mark.
874 */
875 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
876 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
877 0, M_DONTWAIT, 1, MT_DATA);
878
879 if (oper) {
880 struct sctp_paramhdr *ph;
881 uint32_t *ippp;
882
883 SCTP_BUF_LEN(oper) =
884 sizeof(struct sctp_paramhdr) +
885 (sizeof(uint32_t) * 3);
886 ph = mtod(oper, struct sctp_paramhdr *);
887 ph->param_type =
888 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
889 ph->param_length = htons(SCTP_BUF_LEN(oper));
890 ippp = (uint32_t *) (ph + 1);
891 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
892 ippp++;
893 *ippp = chk->rec.data.TSN_seq;
894 ippp++;
895 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
896
897 }
898 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
899 sctp_abort_an_association(stcb->sctp_ep, stcb,
900 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
901 *abort_flag = 1;
902 } else if (asoc->fragmented_delivery_inprogress &&
903 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
904 /*
905 * We are doing a partial delivery and the
906 * NEXT chunk MUST be either the LAST or
907 * MIDDLE fragment NOT a FIRST
908 */
909 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
910 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
911 0, M_DONTWAIT, 1, MT_DATA);
912 if (oper) {
913 struct sctp_paramhdr *ph;
914 uint32_t *ippp;
915
916 SCTP_BUF_LEN(oper) =
917 sizeof(struct sctp_paramhdr) +
918 (3 * sizeof(uint32_t));
919 ph = mtod(oper, struct sctp_paramhdr *);
920 ph->param_type =
921 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
922 ph->param_length = htons(SCTP_BUF_LEN(oper));
923 ippp = (uint32_t *) (ph + 1);
924 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
925 ippp++;
926 *ippp = chk->rec.data.TSN_seq;
927 ippp++;
928 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
929 }
930 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
931 sctp_abort_an_association(stcb->sctp_ep, stcb,
932 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
933 *abort_flag = 1;
934 } else if (asoc->fragmented_delivery_inprogress) {
935 /*
936 * Here we are ok with a MIDDLE or LAST
937 * piece
938 */
939 if (chk->rec.data.stream_number !=
940 asoc->str_of_pdapi) {
941 /* Got to be the right STR No */
942 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
943 chk->rec.data.stream_number,
944 asoc->str_of_pdapi);
945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
946 0, M_DONTWAIT, 1, MT_DATA);
947 if (oper) {
948 struct sctp_paramhdr *ph;
949 uint32_t *ippp;
950
951 SCTP_BUF_LEN(oper) =
952 sizeof(struct sctp_paramhdr) +
953 (sizeof(uint32_t) * 3);
954 ph = mtod(oper,
955 struct sctp_paramhdr *);
956 ph->param_type =
957 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
958 ph->param_length =
959 htons(SCTP_BUF_LEN(oper));
960 ippp = (uint32_t *) (ph + 1);
961 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
962 ippp++;
963 *ippp = chk->rec.data.TSN_seq;
964 ippp++;
965 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
966 }
967 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
968 sctp_abort_an_association(stcb->sctp_ep,
969 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
970 *abort_flag = 1;
971 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
972 SCTP_DATA_UNORDERED &&
973 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
974 /* Got to be the right STR Seq */
975 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
976 chk->rec.data.stream_seq,
977 asoc->ssn_of_pdapi);
978 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
979 0, M_DONTWAIT, 1, MT_DATA);
980 if (oper) {
981 struct sctp_paramhdr *ph;
982 uint32_t *ippp;
983
984 SCTP_BUF_LEN(oper) =
985 sizeof(struct sctp_paramhdr) +
986 (3 * sizeof(uint32_t));
987 ph = mtod(oper,
988 struct sctp_paramhdr *);
989 ph->param_type =
990 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
991 ph->param_length =
992 htons(SCTP_BUF_LEN(oper));
993 ippp = (uint32_t *) (ph + 1);
994 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
995 ippp++;
996 *ippp = chk->rec.data.TSN_seq;
997 ippp++;
998 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
999
1000 }
1001 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1002 sctp_abort_an_association(stcb->sctp_ep,
1003 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1004 *abort_flag = 1;
1005 }
1006 }
1007 }
1008 return;
1009 }
1010 /* Find its place */
1011 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1012 if (compare_with_wrap(at->rec.data.TSN_seq,
1013 chk->rec.data.TSN_seq, MAX_TSN)) {
1014 /*
1015 * one in queue is bigger than the new one, insert
1016 * before this one
1017 */
1018 /* A check */
1019 asoc->size_on_reasm_queue += chk->send_size;
1020 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1021 next = at;
1022 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1023 break;
1024 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1025 /* Gak, He sent me a duplicate str seq number */
1026 /*
1027 * foo bar, I guess I will just free this new guy,
1028 * should we abort too? FIX ME MAYBE? Or it COULD be
1029 * that the SSN's have wrapped. Maybe I should
1030 * compare to TSN somehow... sigh for now just blow
1031 * away the chunk!
1032 */
1033 if (chk->data) {
1034 sctp_m_freem(chk->data);
1035 chk->data = NULL;
1036 }
1037 sctp_free_a_chunk(stcb, chk);
1038 return;
1039 } else {
1040 last_flags = at->rec.data.rcv_flags;
1041 last_tsn = at->rec.data.TSN_seq;
1042 prev = at;
1043 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1044 /*
1045 * We are at the end, insert it after this
1046 * one
1047 */
1048 /* check it first */
1049 asoc->size_on_reasm_queue += chk->send_size;
1050 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1051 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1052 break;
1053 }
1054 }
1055 }
1056 /* Now the audits */
1057 if (prev) {
1058 prev_tsn = chk->rec.data.TSN_seq - 1;
1059 if (prev_tsn == prev->rec.data.TSN_seq) {
1060 /*
1061 * Ok the one I am dropping onto the end is the
1062 * NEXT. A bit of valdiation here.
1063 */
1064 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 SCTP_DATA_FIRST_FRAG ||
1066 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1067 SCTP_DATA_MIDDLE_FRAG) {
1068 /*
1069 * Insert chk MUST be a MIDDLE or LAST
1070 * fragment
1071 */
1072 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1073 SCTP_DATA_FIRST_FRAG) {
1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1075 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1076 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1077 0, M_DONTWAIT, 1, MT_DATA);
1078 if (oper) {
1079 struct sctp_paramhdr *ph;
1080 uint32_t *ippp;
1081
1082 SCTP_BUF_LEN(oper) =
1083 sizeof(struct sctp_paramhdr) +
1084 (3 * sizeof(uint32_t));
1085 ph = mtod(oper,
1086 struct sctp_paramhdr *);
1087 ph->param_type =
1088 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1089 ph->param_length =
1090 htons(SCTP_BUF_LEN(oper));
1091 ippp = (uint32_t *) (ph + 1);
1092 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1093 ippp++;
1094 *ippp = chk->rec.data.TSN_seq;
1095 ippp++;
1096 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1097
1098 }
1099 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1100 sctp_abort_an_association(stcb->sctp_ep,
1101 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1102 *abort_flag = 1;
1103 return;
1104 }
1105 if (chk->rec.data.stream_number !=
1106 prev->rec.data.stream_number) {
1107 /*
1108 * Huh, need the correct STR here,
1109 * they must be the same.
1110 */
1111 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1112 chk->rec.data.stream_number,
1113 prev->rec.data.stream_number);
1114 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1115 0, M_DONTWAIT, 1, MT_DATA);
1116 if (oper) {
1117 struct sctp_paramhdr *ph;
1118 uint32_t *ippp;
1119
1120 SCTP_BUF_LEN(oper) =
1121 sizeof(struct sctp_paramhdr) +
1122 (3 * sizeof(uint32_t));
1123 ph = mtod(oper,
1124 struct sctp_paramhdr *);
1125 ph->param_type =
1126 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1127 ph->param_length =
1128 htons(SCTP_BUF_LEN(oper));
1129 ippp = (uint32_t *) (ph + 1);
1130 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1131 ippp++;
1132 *ippp = chk->rec.data.TSN_seq;
1133 ippp++;
1134 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1135 }
1136 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1137 sctp_abort_an_association(stcb->sctp_ep,
1138 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1139
1140 *abort_flag = 1;
1141 return;
1142 }
1143 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1144 chk->rec.data.stream_seq !=
1145 prev->rec.data.stream_seq) {
1146 /*
1147 * Huh, need the correct STR here,
1148 * they must be the same.
1149 */
1150 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1151 chk->rec.data.stream_seq,
1152 prev->rec.data.stream_seq);
1153 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1154 0, M_DONTWAIT, 1, MT_DATA);
1155 if (oper) {
1156 struct sctp_paramhdr *ph;
1157 uint32_t *ippp;
1158
1159 SCTP_BUF_LEN(oper) =
1160 sizeof(struct sctp_paramhdr) +
1161 (3 * sizeof(uint32_t));
1162 ph = mtod(oper,
1163 struct sctp_paramhdr *);
1164 ph->param_type =
1165 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1166 ph->param_length =
1167 htons(SCTP_BUF_LEN(oper));
1168 ippp = (uint32_t *) (ph + 1);
1169 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1170 ippp++;
1171 *ippp = chk->rec.data.TSN_seq;
1172 ippp++;
1173 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1174 }
1175 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1176 sctp_abort_an_association(stcb->sctp_ep,
1177 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1178
1179 *abort_flag = 1;
1180 return;
1181 }
1182 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1183 SCTP_DATA_LAST_FRAG) {
1184 /* Insert chk MUST be a FIRST */
1185 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1186 SCTP_DATA_FIRST_FRAG) {
1187 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1188 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1189 0, M_DONTWAIT, 1, MT_DATA);
1190 if (oper) {
1191 struct sctp_paramhdr *ph;
1192 uint32_t *ippp;
1193
1194 SCTP_BUF_LEN(oper) =
1195 sizeof(struct sctp_paramhdr) +
1196 (3 * sizeof(uint32_t));
1197 ph = mtod(oper,
1198 struct sctp_paramhdr *);
1199 ph->param_type =
1200 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1201 ph->param_length =
1202 htons(SCTP_BUF_LEN(oper));
1203 ippp = (uint32_t *) (ph + 1);
1204 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1205 ippp++;
1206 *ippp = chk->rec.data.TSN_seq;
1207 ippp++;
1208 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1209
1210 }
1211 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1212 sctp_abort_an_association(stcb->sctp_ep,
1213 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1214
1215 *abort_flag = 1;
1216 return;
1217 }
1218 }
1219 }
1220 }
1221 if (next) {
1222 post_tsn = chk->rec.data.TSN_seq + 1;
1223 if (post_tsn == next->rec.data.TSN_seq) {
1224 /*
1225 * Ok the one I am inserting ahead of is my NEXT
1226 * one. A bit of valdiation here.
1227 */
1228 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1229 /* Insert chk MUST be a last fragment */
1230 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1231 != SCTP_DATA_LAST_FRAG) {
1232 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1233 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1234 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1235 0, M_DONTWAIT, 1, MT_DATA);
1236 if (oper) {
1237 struct sctp_paramhdr *ph;
1238 uint32_t *ippp;
1239
1240 SCTP_BUF_LEN(oper) =
1241 sizeof(struct sctp_paramhdr) +
1242 (3 * sizeof(uint32_t));
1243 ph = mtod(oper,
1244 struct sctp_paramhdr *);
1245 ph->param_type =
1246 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1247 ph->param_length =
1248 htons(SCTP_BUF_LEN(oper));
1249 ippp = (uint32_t *) (ph + 1);
1250 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1251 ippp++;
1252 *ippp = chk->rec.data.TSN_seq;
1253 ippp++;
1254 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1255 }
1256 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1257 sctp_abort_an_association(stcb->sctp_ep,
1258 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1259
1260 *abort_flag = 1;
1261 return;
1262 }
1263 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1264 SCTP_DATA_MIDDLE_FRAG ||
1265 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1266 SCTP_DATA_LAST_FRAG) {
1267 /*
1268 * Insert chk CAN be MIDDLE or FIRST NOT
1269 * LAST
1270 */
1271 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1272 SCTP_DATA_LAST_FRAG) {
1273 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1274 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1275 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1276 0, M_DONTWAIT, 1, MT_DATA);
1277 if (oper) {
1278 struct sctp_paramhdr *ph;
1279 uint32_t *ippp;
1280
1281 SCTP_BUF_LEN(oper) =
1282 sizeof(struct sctp_paramhdr) +
1283 (3 * sizeof(uint32_t));
1284 ph = mtod(oper,
1285 struct sctp_paramhdr *);
1286 ph->param_type =
1287 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1288 ph->param_length =
1289 htons(SCTP_BUF_LEN(oper));
1290 ippp = (uint32_t *) (ph + 1);
1291 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1292 ippp++;
1293 *ippp = chk->rec.data.TSN_seq;
1294 ippp++;
1295 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1296
1297 }
1298 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1299 sctp_abort_an_association(stcb->sctp_ep,
1300 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1301
1302 *abort_flag = 1;
1303 return;
1304 }
1305 if (chk->rec.data.stream_number !=
1306 next->rec.data.stream_number) {
1307 /*
1308 * Huh, need the correct STR here,
1309 * they must be the same.
1310 */
1311 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1312 chk->rec.data.stream_number,
1313 next->rec.data.stream_number);
1314 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1315 0, M_DONTWAIT, 1, MT_DATA);
1316 if (oper) {
1317 struct sctp_paramhdr *ph;
1318 uint32_t *ippp;
1319
1320 SCTP_BUF_LEN(oper) =
1321 sizeof(struct sctp_paramhdr) +
1322 (3 * sizeof(uint32_t));
1323 ph = mtod(oper,
1324 struct sctp_paramhdr *);
1325 ph->param_type =
1326 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1327 ph->param_length =
1328 htons(SCTP_BUF_LEN(oper));
1329 ippp = (uint32_t *) (ph + 1);
1330 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1331 ippp++;
1332 *ippp = chk->rec.data.TSN_seq;
1333 ippp++;
1334 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1335
1336 }
1337 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1338 sctp_abort_an_association(stcb->sctp_ep,
1339 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1340
1341 *abort_flag = 1;
1342 return;
1343 }
1344 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1345 chk->rec.data.stream_seq !=
1346 next->rec.data.stream_seq) {
1347 /*
1348 * Huh, need the correct STR here,
1349 * they must be the same.
1350 */
1351 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1352 chk->rec.data.stream_seq,
1353 next->rec.data.stream_seq);
1354 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1355 0, M_DONTWAIT, 1, MT_DATA);
1356 if (oper) {
1357 struct sctp_paramhdr *ph;
1358 uint32_t *ippp;
1359
1360 SCTP_BUF_LEN(oper) =
1361 sizeof(struct sctp_paramhdr) +
1362 (3 * sizeof(uint32_t));
1363 ph = mtod(oper,
1364 struct sctp_paramhdr *);
1365 ph->param_type =
1366 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1367 ph->param_length =
1368 htons(SCTP_BUF_LEN(oper));
1369 ippp = (uint32_t *) (ph + 1);
1370 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1371 ippp++;
1372 *ippp = chk->rec.data.TSN_seq;
1373 ippp++;
1374 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1375 }
1376 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1377 sctp_abort_an_association(stcb->sctp_ep,
1378 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1379
1380 *abort_flag = 1;
1381 return;
1382 }
1383 }
1384 }
1385 }
1386 /* Do we need to do some delivery? check */
1387 sctp_deliver_reasm_check(stcb, asoc);
1388 }
1389
1390 /*
1391 * This is an unfortunate routine. It checks to make sure a evil guy is not
1392 * stuffing us full of bad packet fragments. A broken peer could also do this
1393 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1394 * :< more cycles.
1395 */
1396 static int
1397 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1398 uint32_t TSN_seq)
1399 {
1400 struct sctp_tmit_chunk *at;
1401 uint32_t tsn_est;
1402
1403 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1404 if (compare_with_wrap(TSN_seq,
1405 at->rec.data.TSN_seq, MAX_TSN)) {
1406 /* is it one bigger? */
1407 tsn_est = at->rec.data.TSN_seq + 1;
1408 if (tsn_est == TSN_seq) {
1409 /* yep. It better be a last then */
1410 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1411 SCTP_DATA_LAST_FRAG) {
1412 /*
1413 * Ok this guy belongs next to a guy
1414 * that is NOT last, it should be a
1415 * middle/last, not a complete
1416 * chunk.
1417 */
1418 return (1);
1419 } else {
1420 /*
1421 * This guy is ok since its a LAST
1422 * and the new chunk is a fully
1423 * self- contained one.
1424 */
1425 return (0);
1426 }
1427 }
1428 } else if (TSN_seq == at->rec.data.TSN_seq) {
1429 /* Software error since I have a dup? */
1430 return (1);
1431 } else {
1432 /*
1433 * Ok, 'at' is larger than new chunk but does it
1434 * need to be right before it.
1435 */
1436 tsn_est = TSN_seq + 1;
1437 if (tsn_est == at->rec.data.TSN_seq) {
1438 /* Yep, It better be a first */
1439 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1440 SCTP_DATA_FIRST_FRAG) {
1441 return (1);
1442 } else {
1443 return (0);
1444 }
1445 }
1446 }
1447 }
1448 return (0);
1449 }
1450
1451
1452 static int
1453 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1454 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1455 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1456 int *break_flag, int last_chunk)
1457 {
1458 /* Process a data chunk */
1459 /* struct sctp_tmit_chunk *chk; */
1460 struct sctp_tmit_chunk *chk;
1461 uint32_t tsn, gap;
1462 struct mbuf *dmbuf;
1463 int indx, the_len;
1464 int need_reasm_check = 0;
1465 uint16_t strmno, strmseq;
1466 struct mbuf *oper;
1467 struct sctp_queued_to_read *control;
1468 int ordered;
1469 uint32_t protocol_id;
1470 uint8_t chunk_flags;
1471 struct sctp_stream_reset_list *liste;
1472
1473 chk = NULL;
1474 tsn = ntohl(ch->dp.tsn);
1475 chunk_flags = ch->ch.chunk_flags;
1476 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1477 asoc->send_sack = 1;
1478 }
1479 protocol_id = ch->dp.protocol_id;
1480 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1482 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1483 }
1484 if (stcb == NULL) {
1485 return (0);
1486 }
1487 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1488 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1489 asoc->cumulative_tsn == tsn) {
1490 /* It is a duplicate */
1491 SCTP_STAT_INCR(sctps_recvdupdata);
1492 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1493 /* Record a dup for the next outbound sack */
1494 asoc->dup_tsns[asoc->numduptsns] = tsn;
1495 asoc->numduptsns++;
1496 }
1497 asoc->send_sack = 1;
1498 return (0);
1499 }
1500 /* Calculate the number of TSN's between the base and this TSN */
1501 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1502 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1503 /* Can't hold the bit in the mapping at max array, toss it */
1504 return (0);
1505 }
1506 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1507 SCTP_TCB_LOCK_ASSERT(stcb);
1508 if (sctp_expand_mapping_array(asoc, gap)) {
1509 /* Can't expand, drop it */
1510 return (0);
1511 }
1512 }
1513 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1514 *high_tsn = tsn;
1515 }
1516 /* See if we have received this one already */
1517 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1518 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1519 SCTP_STAT_INCR(sctps_recvdupdata);
1520 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1521 /* Record a dup for the next outbound sack */
1522 asoc->dup_tsns[asoc->numduptsns] = tsn;
1523 asoc->numduptsns++;
1524 }
1525 asoc->send_sack = 1;
1526 return (0);
1527 }
1528 /*
1529 * Check to see about the GONE flag, duplicates would cause a sack
1530 * to be sent up above
1531 */
1532 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1533 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1534 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1535 ) {
1536 /*
1537 * wait a minute, this guy is gone, there is no longer a
1538 * receiver. Send peer an ABORT!
1539 */
1540 struct mbuf *op_err;
1541
1542 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1543 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1544 *abort_flag = 1;
1545 return (0);
1546 }
1547 /*
1548 * Now before going further we see if there is room. If NOT then we
1549 * MAY let one through only IF this TSN is the one we are waiting
1550 * for on a partial delivery API.
1551 */
1552
1553 /* now do the tests */
1554 if (((asoc->cnt_on_all_streams +
1555 asoc->cnt_on_reasm_queue +
1556 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1557 (((int)asoc->my_rwnd) <= 0)) {
1558 /*
1559 * When we have NO room in the rwnd we check to make sure
1560 * the reader is doing its job...
1561 */
1562 if (stcb->sctp_socket->so_rcv.sb_cc) {
1563 /* some to read, wake-up */
1564 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1565 struct socket *so;
1566
1567 so = SCTP_INP_SO(stcb->sctp_ep);
1568 atomic_add_int(&stcb->asoc.refcnt, 1);
1569 SCTP_TCB_UNLOCK(stcb);
1570 SCTP_SOCKET_LOCK(so, 1);
1571 SCTP_TCB_LOCK(stcb);
1572 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1573 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1574 /* assoc was freed while we were unlocked */
1575 SCTP_SOCKET_UNLOCK(so, 1);
1576 return (0);
1577 }
1578 #endif
1579 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1580 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1581 SCTP_SOCKET_UNLOCK(so, 1);
1582 #endif
1583 }
1584 /* now is it in the mapping array of what we have accepted? */
1585 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1586 compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1587 /* Nope not in the valid range dump it */
1588 sctp_set_rwnd(stcb, asoc);
1589 if ((asoc->cnt_on_all_streams +
1590 asoc->cnt_on_reasm_queue +
1591 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1592 SCTP_STAT_INCR(sctps_datadropchklmt);
1593 } else {
1594 SCTP_STAT_INCR(sctps_datadroprwnd);
1595 }
1596 indx = *break_flag;
1597 *break_flag = 1;
1598 return (0);
1599 }
1600 }
1601 strmno = ntohs(ch->dp.stream_id);
1602 if (strmno >= asoc->streamincnt) {
1603 struct sctp_paramhdr *phdr;
1604 struct mbuf *mb;
1605
1606 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1607 0, M_DONTWAIT, 1, MT_DATA);
1608 if (mb != NULL) {
1609 /* add some space up front so prepend will work well */
1610 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1611 phdr = mtod(mb, struct sctp_paramhdr *);
1612 /*
1613 * Error causes are just param's and this one has
1614 * two back to back phdr, one with the error type
1615 * and size, the other with the streamid and a rsvd
1616 */
1617 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1618 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1619 phdr->param_length =
1620 htons(sizeof(struct sctp_paramhdr) * 2);
1621 phdr++;
1622 /* We insert the stream in the type field */
1623 phdr->param_type = ch->dp.stream_id;
1624 /* And set the length to 0 for the rsvd field */
1625 phdr->param_length = 0;
1626 sctp_queue_op_err(stcb, mb);
1627 }
1628 SCTP_STAT_INCR(sctps_badsid);
1629 SCTP_TCB_LOCK_ASSERT(stcb);
1630 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1631 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1632 asoc->highest_tsn_inside_nr_map = tsn;
1633 }
1634 if (tsn == (asoc->cumulative_tsn + 1)) {
1635 /* Update cum-ack */
1636 asoc->cumulative_tsn = tsn;
1637 }
1638 return (0);
1639 }
1640 /*
1641 * Before we continue lets validate that we are not being fooled by
1642 * an evil attacker. We can only have 4k chunks based on our TSN
1643 * spread allowed by the mapping array 512 * 8 bits, so there is no
1644 * way our stream sequence numbers could have wrapped. We of course
1645 * only validate the FIRST fragment so the bit must be set.
1646 */
1647 strmseq = ntohs(ch->dp.stream_sequence);
1648 #ifdef SCTP_ASOCLOG_OF_TSNS
1649 SCTP_TCB_LOCK_ASSERT(stcb);
1650 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1651 asoc->tsn_in_at = 0;
1652 asoc->tsn_in_wrapped = 1;
1653 }
1654 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1655 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1656 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1657 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1658 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1659 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1660 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1661 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1662 asoc->tsn_in_at++;
1663 #endif
1664 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1665 (TAILQ_EMPTY(&asoc->resetHead)) &&
1666 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1667 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1668 strmseq, MAX_SEQ) ||
1669 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1670 /* The incoming sseq is behind where we last delivered? */
1671 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1672 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1673 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1674 0, M_DONTWAIT, 1, MT_DATA);
1675 if (oper) {
1676 struct sctp_paramhdr *ph;
1677 uint32_t *ippp;
1678
1679 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1680 (3 * sizeof(uint32_t));
1681 ph = mtod(oper, struct sctp_paramhdr *);
1682 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1683 ph->param_length = htons(SCTP_BUF_LEN(oper));
1684 ippp = (uint32_t *) (ph + 1);
1685 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1686 ippp++;
1687 *ippp = tsn;
1688 ippp++;
1689 *ippp = ((strmno << 16) | strmseq);
1690
1691 }
1692 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1693 sctp_abort_an_association(stcb->sctp_ep, stcb,
1694 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1695 *abort_flag = 1;
1696 return (0);
1697 }
1698 /************************************
1699 * From here down we may find ch-> invalid
1700 * so its a good idea NOT to use it.
1701 *************************************/
1702
1703 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1704 if (last_chunk == 0) {
1705 dmbuf = SCTP_M_COPYM(*m,
1706 (offset + sizeof(struct sctp_data_chunk)),
1707 the_len, M_DONTWAIT);
1708 #ifdef SCTP_MBUF_LOGGING
1709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1710 struct mbuf *mat;
1711
1712 mat = dmbuf;
1713 while (mat) {
1714 if (SCTP_BUF_IS_EXTENDED(mat)) {
1715 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1716 }
1717 mat = SCTP_BUF_NEXT(mat);
1718 }
1719 }
1720 #endif
1721 } else {
1722 /* We can steal the last chunk */
1723 int l_len;
1724
1725 dmbuf = *m;
1726 /* lop off the top part */
1727 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1728 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1729 l_len = SCTP_BUF_LEN(dmbuf);
1730 } else {
1731 /*
1732 * need to count up the size hopefully does not hit
1733 * this to often :-0
1734 */
1735 struct mbuf *lat;
1736
1737 l_len = 0;
1738 lat = dmbuf;
1739 while (lat) {
1740 l_len += SCTP_BUF_LEN(lat);
1741 lat = SCTP_BUF_NEXT(lat);
1742 }
1743 }
1744 if (l_len > the_len) {
1745 /* Trim the end round bytes off too */
1746 m_adj(dmbuf, -(l_len - the_len));
1747 }
1748 }
1749 if (dmbuf == NULL) {
1750 SCTP_STAT_INCR(sctps_nomem);
1751 return (0);
1752 }
1753 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1754 asoc->fragmented_delivery_inprogress == 0 &&
1755 TAILQ_EMPTY(&asoc->resetHead) &&
1756 ((ordered == 0) ||
1757 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1758 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1759 /* Candidate for express delivery */
1760 /*
1761 * Its not fragmented, No PD-API is up, Nothing in the
1762 * delivery queue, Its un-ordered OR ordered and the next to
1763 * deliver AND nothing else is stuck on the stream queue,
1764 * And there is room for it in the socket buffer. Lets just
1765 * stuff it up the buffer....
1766 */
1767
1768 /* It would be nice to avoid this copy if we could :< */
1769 sctp_alloc_a_readq(stcb, control);
1770 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1771 protocol_id,
1772 stcb->asoc.context,
1773 strmno, strmseq,
1774 chunk_flags,
1775 dmbuf);
1776 if (control == NULL) {
1777 goto failed_express_del;
1778 }
1779 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1780 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1781 asoc->highest_tsn_inside_nr_map = tsn;
1782 }
1783 sctp_add_to_readq(stcb->sctp_ep, stcb,
1784 control, &stcb->sctp_socket->so_rcv,
1785 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1786
1787 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1788 /* for ordered, bump what we delivered */
1789 asoc->strmin[strmno].last_sequence_delivered++;
1790 }
1791 SCTP_STAT_INCR(sctps_recvexpress);
1792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1793 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1794 SCTP_STR_LOG_FROM_EXPRS_DEL);
1795 }
1796 control = NULL;
1797
1798 goto finish_express_del;
1799 }
1800 failed_express_del:
1801 /* If we reach here this is a new chunk */
1802 chk = NULL;
1803 control = NULL;
1804 /* Express for fragmented delivery? */
1805 if ((asoc->fragmented_delivery_inprogress) &&
1806 (stcb->asoc.control_pdapi) &&
1807 (asoc->str_of_pdapi == strmno) &&
1808 (asoc->ssn_of_pdapi == strmseq)
1809 ) {
1810 control = stcb->asoc.control_pdapi;
1811 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1812 /* Can't be another first? */
1813 goto failed_pdapi_express_del;
1814 }
1815 if (tsn == (control->sinfo_tsn + 1)) {
1816 /* Yep, we can add it on */
1817 int end = 0;
1818 uint32_t cumack;
1819
1820 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1821 end = 1;
1822 }
1823 cumack = asoc->cumulative_tsn;
1824 if ((cumack + 1) == tsn)
1825 cumack = tsn;
1826
1827 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1828 tsn,
1829 &stcb->sctp_socket->so_rcv)) {
1830 SCTP_PRINTF("Append fails end:%d\n", end);
1831 goto failed_pdapi_express_del;
1832 }
1833 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1834 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1835 asoc->highest_tsn_inside_nr_map = tsn;
1836 }
1837 SCTP_STAT_INCR(sctps_recvexpressm);
1838 control->sinfo_tsn = tsn;
1839 asoc->tsn_last_delivered = tsn;
1840 asoc->fragment_flags = chunk_flags;
1841 asoc->tsn_of_pdapi_last_delivered = tsn;
1842 asoc->last_flags_delivered = chunk_flags;
1843 asoc->last_strm_seq_delivered = strmseq;
1844 asoc->last_strm_no_delivered = strmno;
1845 if (end) {
1846 /* clean up the flags and such */
1847 asoc->fragmented_delivery_inprogress = 0;
1848 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1849 asoc->strmin[strmno].last_sequence_delivered++;
1850 }
1851 stcb->asoc.control_pdapi = NULL;
1852 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1853 /*
1854 * There could be another message
1855 * ready
1856 */
1857 need_reasm_check = 1;
1858 }
1859 }
1860 control = NULL;
1861 goto finish_express_del;
1862 }
1863 }
1864 failed_pdapi_express_del:
1865 control = NULL;
1866 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1867 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1868 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1869 asoc->highest_tsn_inside_nr_map = tsn;
1870 }
1871 } else {
1872 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1873 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1874 asoc->highest_tsn_inside_map = tsn;
1875 }
1876 }
1877 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1878 sctp_alloc_a_chunk(stcb, chk);
1879 if (chk == NULL) {
1880 /* No memory so we drop the chunk */
1881 SCTP_STAT_INCR(sctps_nomem);
1882 if (last_chunk == 0) {
1883 /* we copied it, free the copy */
1884 sctp_m_freem(dmbuf);
1885 }
1886 return (0);
1887 }
1888 chk->rec.data.TSN_seq = tsn;
1889 chk->no_fr_allowed = 0;
1890 chk->rec.data.stream_seq = strmseq;
1891 chk->rec.data.stream_number = strmno;
1892 chk->rec.data.payloadtype = protocol_id;
1893 chk->rec.data.context = stcb->asoc.context;
1894 chk->rec.data.doing_fast_retransmit = 0;
1895 chk->rec.data.rcv_flags = chunk_flags;
1896 chk->asoc = asoc;
1897 chk->send_size = the_len;
1898 chk->whoTo = net;
1899 atomic_add_int(&net->ref_count, 1);
1900 chk->data = dmbuf;
1901 } else {
1902 sctp_alloc_a_readq(stcb, control);
1903 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1904 protocol_id,
1905 stcb->asoc.context,
1906 strmno, strmseq,
1907 chunk_flags,
1908 dmbuf);
1909 if (control == NULL) {
1910 /* No memory so we drop the chunk */
1911 SCTP_STAT_INCR(sctps_nomem);
1912 if (last_chunk == 0) {
1913 /* we copied it, free the copy */
1914 sctp_m_freem(dmbuf);
1915 }
1916 return (0);
1917 }
1918 control->length = the_len;
1919 }
1920
1921 /* Mark it as received */
1922 /* Now queue it where it belongs */
1923 if (control != NULL) {
1924 /* First a sanity check */
1925 if (asoc->fragmented_delivery_inprogress) {
1926 /*
1927 * Ok, we have a fragmented delivery in progress if
1928 * this chunk is next to deliver OR belongs in our
1929 * view to the reassembly, the peer is evil or
1930 * broken.
1931 */
1932 uint32_t estimate_tsn;
1933
1934 estimate_tsn = asoc->tsn_last_delivered + 1;
1935 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1936 (estimate_tsn == control->sinfo_tsn)) {
1937 /* Evil/Broke peer */
1938 sctp_m_freem(control->data);
1939 control->data = NULL;
1940 if (control->whoFrom) {
1941 sctp_free_remote_addr(control->whoFrom);
1942 control->whoFrom = NULL;
1943 }
1944 sctp_free_a_readq(stcb, control);
1945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1946 0, M_DONTWAIT, 1, MT_DATA);
1947 if (oper) {
1948 struct sctp_paramhdr *ph;
1949 uint32_t *ippp;
1950
1951 SCTP_BUF_LEN(oper) =
1952 sizeof(struct sctp_paramhdr) +
1953 (3 * sizeof(uint32_t));
1954 ph = mtod(oper, struct sctp_paramhdr *);
1955 ph->param_type =
1956 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1957 ph->param_length = htons(SCTP_BUF_LEN(oper));
1958 ippp = (uint32_t *) (ph + 1);
1959 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1960 ippp++;
1961 *ippp = tsn;
1962 ippp++;
1963 *ippp = ((strmno << 16) | strmseq);
1964 }
1965 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1966 sctp_abort_an_association(stcb->sctp_ep, stcb,
1967 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1968
1969 *abort_flag = 1;
1970 return (0);
1971 } else {
1972 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1973 sctp_m_freem(control->data);
1974 control->data = NULL;
1975 if (control->whoFrom) {
1976 sctp_free_remote_addr(control->whoFrom);
1977 control->whoFrom = NULL;
1978 }
1979 sctp_free_a_readq(stcb, control);
1980
1981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1982 0, M_DONTWAIT, 1, MT_DATA);
1983 if (oper) {
1984 struct sctp_paramhdr *ph;
1985 uint32_t *ippp;
1986
1987 SCTP_BUF_LEN(oper) =
1988 sizeof(struct sctp_paramhdr) +
1989 (3 * sizeof(uint32_t));
1990 ph = mtod(oper,
1991 struct sctp_paramhdr *);
1992 ph->param_type =
1993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1994 ph->param_length =
1995 htons(SCTP_BUF_LEN(oper));
1996 ippp = (uint32_t *) (ph + 1);
1997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1998 ippp++;
1999 *ippp = tsn;
2000 ippp++;
2001 *ippp = ((strmno << 16) | strmseq);
2002 }
2003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2004 sctp_abort_an_association(stcb->sctp_ep,
2005 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2006
2007 *abort_flag = 1;
2008 return (0);
2009 }
2010 }
2011 } else {
2012 /* No PDAPI running */
2013 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2014 /*
2015 * Reassembly queue is NOT empty validate
2016 * that this tsn does not need to be in
2017 * reasembly queue. If it does then our peer
2018 * is broken or evil.
2019 */
2020 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2021 sctp_m_freem(control->data);
2022 control->data = NULL;
2023 if (control->whoFrom) {
2024 sctp_free_remote_addr(control->whoFrom);
2025 control->whoFrom = NULL;
2026 }
2027 sctp_free_a_readq(stcb, control);
2028 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2029 0, M_DONTWAIT, 1, MT_DATA);
2030 if (oper) {
2031 struct sctp_paramhdr *ph;
2032 uint32_t *ippp;
2033
2034 SCTP_BUF_LEN(oper) =
2035 sizeof(struct sctp_paramhdr) +
2036 (3 * sizeof(uint32_t));
2037 ph = mtod(oper,
2038 struct sctp_paramhdr *);
2039 ph->param_type =
2040 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2041 ph->param_length =
2042 htons(SCTP_BUF_LEN(oper));
2043 ippp = (uint32_t *) (ph + 1);
2044 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2045 ippp++;
2046 *ippp = tsn;
2047 ippp++;
2048 *ippp = ((strmno << 16) | strmseq);
2049 }
2050 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2051 sctp_abort_an_association(stcb->sctp_ep,
2052 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2053
2054 *abort_flag = 1;
2055 return (0);
2056 }
2057 }
2058 }
2059 /* ok, if we reach here we have passed the sanity checks */
2060 if (chunk_flags & SCTP_DATA_UNORDERED) {
2061 /* queue directly into socket buffer */
2062 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2063 sctp_add_to_readq(stcb->sctp_ep, stcb,
2064 control,
2065 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2066 } else {
2067 /*
2068 * Special check for when streams are resetting. We
2069 * could be more smart about this and check the
2070 * actual stream to see if it is not being reset..
2071 * that way we would not create a HOLB when amongst
2072 * streams being reset and those not being reset.
2073 *
2074 * We take complete messages that have a stream reset
2075 * intervening (aka the TSN is after where our
2076 * cum-ack needs to be) off and put them on a
2077 * pending_reply_queue. The reassembly ones we do
2078 * not have to worry about since they are all sorted
2079 * and proceessed by TSN order. It is only the
2080 * singletons I must worry about.
2081 */
2082 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2083 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2084 ) {
2085 /*
2086 * yep its past where we need to reset... go
2087 * ahead and queue it.
2088 */
2089 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2090 /* first one on */
2091 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2092 } else {
2093 struct sctp_queued_to_read *ctlOn;
2094 unsigned char inserted = 0;
2095
2096 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2097 while (ctlOn) {
2098 if (compare_with_wrap(control->sinfo_tsn,
2099 ctlOn->sinfo_tsn, MAX_TSN)) {
2100 ctlOn = TAILQ_NEXT(ctlOn, next);
2101 } else {
2102 /* found it */
2103 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2104 inserted = 1;
2105 break;
2106 }
2107 }
2108 if (inserted == 0) {
2109 /*
2110 * must be put at end, use
2111 * prevP (all setup from
2112 * loop) to setup nextP.
2113 */
2114 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2115 }
2116 }
2117 } else {
2118 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2119 if (*abort_flag) {
2120 return (0);
2121 }
2122 }
2123 }
2124 } else {
2125 /* Into the re-assembly queue */
2126 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2127 if (*abort_flag) {
2128 /*
2129 * the assoc is now gone and chk was put onto the
2130 * reasm queue, which has all been freed.
2131 */
2132 *m = NULL;
2133 return (0);
2134 }
2135 }
2136 finish_express_del:
2137 if (tsn == (asoc->cumulative_tsn + 1)) {
2138 /* Update cum-ack */
2139 asoc->cumulative_tsn = tsn;
2140 }
2141 if (last_chunk) {
2142 *m = NULL;
2143 }
2144 if (ordered) {
2145 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2146 } else {
2147 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2148 }
2149 SCTP_STAT_INCR(sctps_recvdata);
2150 /* Set it present please */
2151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2152 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2153 }
2154 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2155 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2156 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2157 }
2158 /* check the special flag for stream resets */
2159 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2160 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2161 (asoc->cumulative_tsn == liste->tsn))
2162 ) {
2163 /*
2164 * we have finished working through the backlogged TSN's now
2165 * time to reset streams. 1: call reset function. 2: free
2166 * pending_reply space 3: distribute any chunks in
2167 * pending_reply_queue.
2168 */
2169 struct sctp_queued_to_read *ctl;
2170
2171 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2172 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2173 SCTP_FREE(liste, SCTP_M_STRESET);
2174 /* sa_ignore FREED_MEMORY */
2175 liste = TAILQ_FIRST(&asoc->resetHead);
2176 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2177 if (ctl && (liste == NULL)) {
2178 /* All can be removed */
2179 while (ctl) {
2180 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2182 if (*abort_flag) {
2183 return (0);
2184 }
2185 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2186 }
2187 } else if (ctl) {
2188 /* more than one in queue */
2189 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2190 /*
2191 * if ctl->sinfo_tsn is <= liste->tsn we can
2192 * process it which is the NOT of
2193 * ctl->sinfo_tsn > liste->tsn
2194 */
2195 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2196 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2197 if (*abort_flag) {
2198 return (0);
2199 }
2200 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2201 }
2202 }
2203 /*
2204 * Now service re-assembly to pick up anything that has been
2205 * held on reassembly queue?
2206 */
2207 sctp_deliver_reasm_check(stcb, asoc);
2208 need_reasm_check = 0;
2209 }
2210 if (need_reasm_check) {
2211 /* Another one waits ? */
2212 sctp_deliver_reasm_check(stcb, asoc);
2213 }
2214 return (1);
2215 }
2216
2217 int8_t sctp_map_lookup_tab[256] = {
2218 0, 1, 0, 2, 0, 1, 0, 3,
2219 0, 1, 0, 2, 0, 1, 0, 4,
2220 0, 1, 0, 2, 0, 1, 0, 3,
2221 0, 1, 0, 2, 0, 1, 0, 5,
2222 0, 1, 0, 2, 0, 1, 0, 3,
2223 0, 1, 0, 2, 0, 1, 0, 4,
2224 0, 1, 0, 2, 0, 1, 0, 3,
2225 0, 1, 0, 2, 0, 1, 0, 6,
2226 0, 1, 0, 2, 0, 1, 0, 3,
2227 0, 1, 0, 2, 0, 1, 0, 4,
2228 0, 1, 0, 2, 0, 1, 0, 3,
2229 0, 1, 0, 2, 0, 1, 0, 5,
2230 0, 1, 0, 2, 0, 1, 0, 3,
2231 0, 1, 0, 2, 0, 1, 0, 4,
2232 0, 1, 0, 2, 0, 1, 0, 3,
2233 0, 1, 0, 2, 0, 1, 0, 7,
2234 0, 1, 0, 2, 0, 1, 0, 3,
2235 0, 1, 0, 2, 0, 1, 0, 4,
2236 0, 1, 0, 2, 0, 1, 0, 3,
2237 0, 1, 0, 2, 0, 1, 0, 5,
2238 0, 1, 0, 2, 0, 1, 0, 3,
2239 0, 1, 0, 2, 0, 1, 0, 4,
2240 0, 1, 0, 2, 0, 1, 0, 3,
2241 0, 1, 0, 2, 0, 1, 0, 6,
2242 0, 1, 0, 2, 0, 1, 0, 3,
2243 0, 1, 0, 2, 0, 1, 0, 4,
2244 0, 1, 0, 2, 0, 1, 0, 3,
2245 0, 1, 0, 2, 0, 1, 0, 5,
2246 0, 1, 0, 2, 0, 1, 0, 3,
2247 0, 1, 0, 2, 0, 1, 0, 4,
2248 0, 1, 0, 2, 0, 1, 0, 3,
2249 0, 1, 0, 2, 0, 1, 0, 8
2250 };
2251
2252
2253 void
2254 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2255 {
2256 /*
2257 * Now we also need to check the mapping array in a couple of ways.
2258 * 1) Did we move the cum-ack point?
2259 *
2260 * When you first glance at this you might think that all entries that
2261 * make up the postion of the cum-ack would be in the nr-mapping
2262 * array only.. i.e. things up to the cum-ack are always
2263 * deliverable. Thats true with one exception, when its a fragmented
2264 * message we may not deliver the data until some threshold (or all
2265 * of it) is in place. So we must OR the nr_mapping_array and
2266 * mapping_array to get a true picture of the cum-ack.
2267 */
2268 struct sctp_association *asoc;
2269 int at;
2270 uint8_t val;
2271 int slide_from, slide_end, lgap, distance;
2272 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2273
2274 asoc = &stcb->asoc;
2275 at = 0;
2276
2277 old_cumack = asoc->cumulative_tsn;
2278 old_base = asoc->mapping_array_base_tsn;
2279 old_highest = asoc->highest_tsn_inside_map;
2280 /*
2281 * We could probably improve this a small bit by calculating the
2282 * offset of the current cum-ack as the starting point.
2283 */
2284 at = 0;
2285 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2286 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2287 if (val == 0xff) {
2288 at += 8;
2289 } else {
2290 /* there is a 0 bit */
2291 at += sctp_map_lookup_tab[val];
2292 break;
2293 }
2294 }
2295 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2296
2297 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2298 compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2299 #ifdef INVARIANTS
2300 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2301 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2302 #else
2303 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2304 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2305 sctp_print_mapping_array(asoc);
2306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2307 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2308 }
2309 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2310 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2311 #endif
2312 }
2313 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2314 asoc->highest_tsn_inside_map,
2315 MAX_TSN)) {
2316 highest_tsn = asoc->highest_tsn_inside_nr_map;
2317 } else {
2318 highest_tsn = asoc->highest_tsn_inside_map;
2319 }
2320 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2321 /* The complete array was completed by a single FR */
2322 /* highest becomes the cum-ack */
2323 int clr;
2324
2325 #ifdef INVARIANTS
2326 unsigned int i;
2327
2328 #endif
2329
2330 /* clear the array */
2331 clr = ((at + 7) >> 3);
2332 if (clr > asoc->mapping_array_size) {
2333 clr = asoc->mapping_array_size;
2334 }
2335 memset(asoc->mapping_array, 0, clr);
2336 memset(asoc->nr_mapping_array, 0, clr);
2337 #ifdef INVARIANTS
2338 for (i = 0; i < asoc->mapping_array_size; i++) {
2339 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2340 printf("Error Mapping array's not clean at clear\n");
2341 sctp_print_mapping_array(asoc);
2342 }
2343 }
2344 #endif
2345 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2346 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2347 } else if (at >= 8) {
2348 /* we can slide the mapping array down */
2349 /* slide_from holds where we hit the first NON 0xff byte */
2350
2351 /*
2352 * now calculate the ceiling of the move using our highest
2353 * TSN value
2354 */
2355 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2356 slide_end = (lgap >> 3);
2357 if (slide_end < slide_from) {
2358 sctp_print_mapping_array(asoc);
2359 #ifdef INVARIANTS
2360 panic("impossible slide");
2361 #else
2362 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2363 lgap, slide_end, slide_from, at);
2364 return;
2365 #endif
2366 }
2367 if (slide_end > asoc->mapping_array_size) {
2368 #ifdef INVARIANTS
2369 panic("would overrun buffer");
2370 #else
2371 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2372 asoc->mapping_array_size, slide_end);
2373 slide_end = asoc->mapping_array_size;
2374 #endif
2375 }
2376 distance = (slide_end - slide_from) + 1;
2377 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2378 sctp_log_map(old_base, old_cumack, old_highest,
2379 SCTP_MAP_PREPARE_SLIDE);
2380 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2381 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2382 }
2383 if (distance + slide_from > asoc->mapping_array_size ||
2384 distance < 0) {
2385 /*
2386 * Here we do NOT slide forward the array so that
2387 * hopefully when more data comes in to fill it up
2388 * we will be able to slide it forward. Really I
2389 * don't think this should happen :-0
2390 */
2391
2392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2393 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2394 (uint32_t) asoc->mapping_array_size,
2395 SCTP_MAP_SLIDE_NONE);
2396 }
2397 } else {
2398 int ii;
2399
2400 for (ii = 0; ii < distance; ii++) {
2401 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2402 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2403
2404 }
2405 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2406 asoc->mapping_array[ii] = 0;
2407 asoc->nr_mapping_array[ii] = 0;
2408 }
2409 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2410 asoc->highest_tsn_inside_map += (slide_from << 3);
2411 }
2412 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2413 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2414 }
2415 asoc->mapping_array_base_tsn += (slide_from << 3);
2416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2417 sctp_log_map(asoc->mapping_array_base_tsn,
2418 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2419 SCTP_MAP_SLIDE_RESULT);
2420 }
2421 }
2422 }
2423 }
2424
2425
2426 void
2427 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2428 {
2429 struct sctp_association *asoc;
2430 uint32_t highest_tsn;
2431
2432 asoc = &stcb->asoc;
2433 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2434 asoc->highest_tsn_inside_map,
2435 MAX_TSN)) {
2436 highest_tsn = asoc->highest_tsn_inside_nr_map;
2437 } else {
2438 highest_tsn = asoc->highest_tsn_inside_map;
2439 }
2440
2441 /*
2442 * Now we need to see if we need to queue a sack or just start the
2443 * timer (if allowed).
2444 */
2445 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2446 /*
2447 * Ok special case, in SHUTDOWN-SENT case. here we maker
2448 * sure SACK timer is off and instead send a SHUTDOWN and a
2449 * SACK
2450 */
2451 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2452 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2453 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2454 }
2455 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2456 sctp_send_sack(stcb);
2457 } else {
2458 int is_a_gap;
2459
2460 /* is there a gap now ? */
2461 is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2462
2463 /*
2464 * CMT DAC algorithm: increase number of packets received
2465 * since last ack
2466 */
2467 stcb->asoc.cmt_dac_pkts_rcvd++;
2468
2469 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2470 * SACK */
2471 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2472 * longer is one */
2473 (stcb->asoc.numduptsns) || /* we have dup's */
2474 (is_a_gap) || /* is still a gap */
2475 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2476 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2477 ) {
2478
2479 if ((stcb->asoc.sctp_cmt_on_off == 1) &&
2480 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2481 (stcb->asoc.send_sack == 0) &&
2482 (stcb->asoc.numduptsns == 0) &&
2483 (stcb->asoc.delayed_ack) &&
2484 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2485
2486 /*
2487 * CMT DAC algorithm: With CMT, delay acks
2488 * even in the face of
2489 *
2490 * reordering. Therefore, if acks that do not
2491 * have to be sent because of the above
2492 * reasons, will be delayed. That is, acks
2493 * that would have been sent due to gap
2494 * reports will be delayed with DAC. Start
2495 * the delayed ack timer.
2496 */
2497 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2498 stcb->sctp_ep, stcb, NULL);
2499 } else {
2500 /*
2501 * Ok we must build a SACK since the timer
2502 * is pending, we got our first packet OR
2503 * there are gaps or duplicates.
2504 */
2505 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2506 sctp_send_sack(stcb);
2507 }
2508 } else {
2509 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2510 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2511 stcb->sctp_ep, stcb, NULL);
2512 }
2513 }
2514 }
2515 }
2516
2517 void
2518 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2519 {
2520 struct sctp_tmit_chunk *chk;
2521 uint32_t tsize, pd_point;
2522 uint16_t nxt_todel;
2523
2524 if (asoc->fragmented_delivery_inprogress) {
2525 sctp_service_reassembly(stcb, asoc);
2526 }
2527 /* Can we proceed further, i.e. the PD-API is complete */
2528 if (asoc->fragmented_delivery_inprogress) {
2529 /* no */
2530 return;
2531 }
2532 /*
2533 * Now is there some other chunk I can deliver from the reassembly
2534 * queue.
2535 */
2536 doit_again:
2537 chk = TAILQ_FIRST(&asoc->reasmqueue);
2538 if (chk == NULL) {
2539 asoc->size_on_reasm_queue = 0;
2540 asoc->cnt_on_reasm_queue = 0;
2541 return;
2542 }
2543 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2544 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2545 ((nxt_todel == chk->rec.data.stream_seq) ||
2546 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2547 /*
2548 * Yep the first one is here. We setup to start reception,
2549 * by backing down the TSN just in case we can't deliver.
2550 */
2551
2552 /*
2553 * Before we start though either all of the message should
2554 * be here or the socket buffer max or nothing on the
2555 * delivery queue and something can be delivered.
2556 */
2557 if (stcb->sctp_socket) {
2558 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2559 stcb->sctp_ep->partial_delivery_point);
2560 } else {
2561 pd_point = stcb->sctp_ep->partial_delivery_point;
2562 }
2563 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2564 asoc->fragmented_delivery_inprogress = 1;
2565 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2566 asoc->str_of_pdapi = chk->rec.data.stream_number;
2567 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2568 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2569 asoc->fragment_flags = chk->rec.data.rcv_flags;
2570 sctp_service_reassembly(stcb, asoc);
2571 if (asoc->fragmented_delivery_inprogress == 0) {
2572 goto doit_again;
2573 }
2574 }
2575 }
2576 }
2577
2578 int
2579 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2580 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2581 struct sctp_nets *net, uint32_t * high_tsn)
2582 {
2583 struct sctp_data_chunk *ch, chunk_buf;
2584 struct sctp_association *asoc;
2585 int num_chunks = 0; /* number of control chunks processed */
2586 int stop_proc = 0;
2587 int chk_length, break_flag, last_chunk;
2588 int abort_flag = 0, was_a_gap;
2589 struct mbuf *m;
2590 uint32_t highest_tsn;
2591
2592 /* set the rwnd */
2593 sctp_set_rwnd(stcb, &stcb->asoc);
2594
2595 m = *mm;
2596 SCTP_TCB_LOCK_ASSERT(stcb);
2597 asoc = &stcb->asoc;
2598 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map, MAX_TSN)) {
2599 highest_tsn = asoc->highest_tsn_inside_nr_map;
2600 } else {
2601 highest_tsn = asoc->highest_tsn_inside_map;
2602 }
2603 was_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2604 /*
2605 * setup where we got the last DATA packet from for any SACK that
2606 * may need to go out. Don't bump the net. This is done ONLY when a
2607 * chunk is assigned.
2608 */
2609 asoc->last_data_chunk_from = net;
2610
2611 /*-
2612 * Now before we proceed we must figure out if this is a wasted
2613 * cluster... i.e. it is a small packet sent in and yet the driver
2614 * underneath allocated a full cluster for it. If so we must copy it
2615 * to a smaller mbuf and free up the cluster mbuf. This will help
2616 * with cluster starvation. Note for __Panda__ we don't do this
2617 * since it has clusters all the way down to 64 bytes.
2618 */
2619 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2620 /* we only handle mbufs that are singletons.. not chains */
2621 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2622 if (m) {
2623 /* ok lets see if we can copy the data up */
2624 caddr_t *from, *to;
2625
2626 /* get the pointers and copy */
2627 to = mtod(m, caddr_t *);
2628 from = mtod((*mm), caddr_t *);
2629 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2630 /* copy the length and free up the old */
2631 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2632 sctp_m_freem(*mm);
2633 /* sucess, back copy */
2634 *mm = m;
2635 } else {
2636 /* We are in trouble in the mbuf world .. yikes */
2637 m = *mm;
2638 }
2639 }
2640 /* get pointer to the first chunk header */
2641 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2642 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2643 if (ch == NULL) {
2644 return (1);
2645 }
2646 /*
2647 * process all DATA chunks...
2648 */
2649 *high_tsn = asoc->cumulative_tsn;
2650 break_flag = 0;
2651 asoc->data_pkts_seen++;
2652 while (stop_proc == 0) {
2653 /* validate chunk length */
2654 chk_length = ntohs(ch->ch.chunk_length);
2655 if (length - *offset < chk_length) {
2656 /* all done, mutulated chunk */
2657 stop_proc = 1;
2658 break;
2659 }
2660 if (ch->ch.chunk_type == SCTP_DATA) {
2661 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2662 /*
2663 * Need to send an abort since we had a
2664 * invalid data chunk.
2665 */
2666 struct mbuf *op_err;
2667
2668 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2669 0, M_DONTWAIT, 1, MT_DATA);
2670
2671 if (op_err) {
2672 struct sctp_paramhdr *ph;
2673 uint32_t *ippp;
2674
2675 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2676 (2 * sizeof(uint32_t));
2677 ph = mtod(op_err, struct sctp_paramhdr *);
2678 ph->param_type =
2679 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2680 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2681 ippp = (uint32_t *) (ph + 1);
2682 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2683 ippp++;
2684 *ippp = asoc->cumulative_tsn;
2685
2686 }
2687 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2688 sctp_abort_association(inp, stcb, m, iphlen, sh,
2689 op_err, 0, net->port);
2690 return (2);
2691 }
2692 #ifdef SCTP_AUDITING_ENABLED
2693 sctp_audit_log(0xB1, 0);
2694 #endif
2695 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2696 last_chunk = 1;
2697 } else {
2698 last_chunk = 0;
2699 }
2700 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2701 chk_length, net, high_tsn, &abort_flag, &break_flag,
2702 last_chunk)) {
2703 num_chunks++;
2704 }
2705 if (abort_flag)
2706 return (2);
2707
2708 if (break_flag) {
2709 /*
2710 * Set because of out of rwnd space and no
2711 * drop rep space left.
2712 */
2713 stop_proc = 1;
2714 break;
2715 }
2716 } else {
2717 /* not a data chunk in the data region */
2718 switch (ch->ch.chunk_type) {
2719 case SCTP_INITIATION:
2720 case SCTP_INITIATION_ACK:
2721 case SCTP_SELECTIVE_ACK:
2722 case SCTP_NR_SELECTIVE_ACK: /* EY */
2723 case SCTP_HEARTBEAT_REQUEST:
2724 case SCTP_HEARTBEAT_ACK:
2725 case SCTP_ABORT_ASSOCIATION:
2726 case SCTP_SHUTDOWN:
2727 case SCTP_SHUTDOWN_ACK:
2728 case SCTP_OPERATION_ERROR:
2729 case SCTP_COOKIE_ECHO:
2730 case SCTP_COOKIE_ACK:
2731 case SCTP_ECN_ECHO:
2732 case SCTP_ECN_CWR:
2733 case SCTP_SHUTDOWN_COMPLETE:
2734 case SCTP_AUTHENTICATION:
2735 case SCTP_ASCONF_ACK:
2736 case SCTP_PACKET_DROPPED:
2737 case SCTP_STREAM_RESET:
2738 case SCTP_FORWARD_CUM_TSN:
2739 case SCTP_ASCONF:
2740 /*
2741 * Now, what do we do with KNOWN chunks that
2742 * are NOT in the right place?
2743 *
2744 * For now, I do nothing but ignore them. We
2745 * may later want to add sysctl stuff to
2746 * switch out and do either an ABORT() or
2747 * possibly process them.
2748 */
2749 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2750 struct mbuf *op_err;
2751
2752 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2753 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2754 return (2);
2755 }
2756 break;
2757 default:
2758 /* unknown chunk type, use bit rules */
2759 if (ch->ch.chunk_type & 0x40) {
2760 /* Add a error report to the queue */
2761 struct mbuf *merr;
2762 struct sctp_paramhdr *phd;
2763
2764 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2765 if (merr) {
2766 phd = mtod(merr, struct sctp_paramhdr *);
2767 /*
2768 * We cheat and use param
2769 * type since we did not
2770 * bother to define a error
2771 * cause struct. They are
2772 * the same basic format
2773 * with different names.
2774 */
2775 phd->param_type =
2776 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2777 phd->param_length =
2778 htons(chk_length + sizeof(*phd));
2779 SCTP_BUF_LEN(merr) = sizeof(*phd);
2780 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2781 SCTP_SIZE32(chk_length),
2782 M_DONTWAIT);
2783 if (SCTP_BUF_NEXT(merr)) {
2784 sctp_queue_op_err(stcb, merr);
2785 } else {
2786 sctp_m_freem(merr);
2787 }
2788 }
2789 }
2790 if ((ch->ch.chunk_type & 0x80) == 0) {
2791 /* discard the rest of this packet */
2792 stop_proc = 1;
2793 } /* else skip this bad chunk and
2794 * continue... */
2795 break;
2796 }; /* switch of chunk type */
2797 }
2798 *offset += SCTP_SIZE32(chk_length);
2799 if ((*offset >= length) || stop_proc) {
2800 /* no more data left in the mbuf chain */
2801 stop_proc = 1;
2802 continue;
2803 }
2804 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2805 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2806 if (ch == NULL) {
2807 *offset = length;
2808 stop_proc = 1;
2809 break;
2810
2811 }
2812 } /* while */
2813 if (break_flag) {
2814 /*
2815 * we need to report rwnd overrun drops.
2816 */
2817 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2818 }
2819 if (num_chunks) {
2820 /*
2821 * Did we get data, if so update the time for auto-close and
2822 * give peer credit for being alive.
2823 */
2824 SCTP_STAT_INCR(sctps_recvpktwithdata);
2825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2826 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2827 stcb->asoc.overall_error_count,
2828 0,
2829 SCTP_FROM_SCTP_INDATA,
2830 __LINE__);
2831 }
2832 stcb->asoc.overall_error_count = 0;
2833 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2834 }
2835 /* now service all of the reassm queue if needed */
2836 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2837 sctp_service_queues(stcb, asoc);
2838
2839 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2840 /* Assure that we ack right away */
2841 stcb->asoc.send_sack = 1;
2842 }
2843 /* Start a sack timer or QUEUE a SACK for sending */
2844 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2845 if (abort_flag)
2846 return (2);
2847
2848 return (0);
2849 }
2850
2851 static int
2852 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2853 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2854 int *num_frs,
2855 uint32_t * biggest_newly_acked_tsn,
2856 uint32_t * this_sack_lowest_newack,
2857 int *ecn_seg_sums)
2858 {
2859 struct sctp_tmit_chunk *tp1;
2860 unsigned int theTSN;
2861 int j, wake_him = 0, circled = 0;
2862
2863 /* Recover the tp1 we last saw */
2864 tp1 = *p_tp1;
2865 if (tp1 == NULL) {
2866 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2867 }
2868 for (j = frag_strt; j <= frag_end; j++) {
2869 theTSN = j + last_tsn;
2870 while (tp1) {
2871 if (tp1->rec.data.doing_fast_retransmit)
2872 (*num_frs) += 1;
2873
2874 /*-
2875 * CMT: CUCv2 algorithm. For each TSN being
2876 * processed from the sent queue, track the
2877 * next expected pseudo-cumack, or
2878 * rtx_pseudo_cumack, if required. Separate
2879 * cumack trackers for first transmissions,
2880 * and retransmissions.
2881 */
2882 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2883 (tp1->snd_count == 1)) {
2884 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2885 tp1->whoTo->find_pseudo_cumack = 0;
2886 }
2887 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2888 (tp1->snd_count > 1)) {
2889 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2890 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2891 }
2892 if (tp1->rec.data.TSN_seq == theTSN) {
2893 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2894 /*-
2895 * must be held until
2896 * cum-ack passes
2897 */
2898 /*-
2899 * ECN Nonce: Add the nonce
2900 * value to the sender's
2901 * nonce sum
2902 */
2903 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2904 /*-
2905 * If it is less than RESEND, it is
2906 * now no-longer in flight.
2907 * Higher values may already be set
2908 * via previous Gap Ack Blocks...
2909 * i.e. ACKED or RESEND.
2910 */
2911 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2912 *biggest_newly_acked_tsn, MAX_TSN)) {
2913 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2914 }
2915 /*-
2916 * CMT: SFR algo (and HTNA) - set
2917 * saw_newack to 1 for dest being
2918 * newly acked. update
2919 * this_sack_highest_newack if
2920 * appropriate.
2921 */
2922 if (tp1->rec.data.chunk_was_revoked == 0)
2923 tp1->whoTo->saw_newack = 1;
2924
2925 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2926 tp1->whoTo->this_sack_highest_newack,
2927 MAX_TSN)) {
2928 tp1->whoTo->this_sack_highest_newack =
2929 tp1->rec.data.TSN_seq;
2930 }
2931 /*-
2932 * CMT DAC algo: also update
2933 * this_sack_lowest_newack
2934 */
2935 if (*this_sack_lowest_newack == 0) {
2936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2937 sctp_log_sack(*this_sack_lowest_newack,
2938 last_tsn,
2939 tp1->rec.data.TSN_seq,
2940 0,
2941 0,
2942 SCTP_LOG_TSN_ACKED);
2943 }
2944 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2945 }
2946 /*-
2947 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2948 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2949 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2950 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2951 * Separate pseudo_cumack trackers for first transmissions and
2952 * retransmissions.
2953 */
2954 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2955 if (tp1->rec.data.chunk_was_revoked == 0) {
2956 tp1->whoTo->new_pseudo_cumack = 1;
2957 }
2958 tp1->whoTo->find_pseudo_cumack = 1;
2959 }
2960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2961 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2962 }
2963 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2964 if (tp1->rec.data.chunk_was_revoked == 0) {
2965 tp1->whoTo->new_pseudo_cumack = 1;
2966 }
2967 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2968 }
2969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2970 sctp_log_sack(*biggest_newly_acked_tsn,
2971 last_tsn,
2972 tp1->rec.data.TSN_seq,
2973 frag_strt,
2974 frag_end,
2975 SCTP_LOG_TSN_ACKED);
2976 }
2977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2978 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2979 tp1->whoTo->flight_size,
2980 tp1->book_size,
2981 (uintptr_t) tp1->whoTo,
2982 tp1->rec.data.TSN_seq);
2983 }
2984 sctp_flight_size_decrease(tp1);
2985 sctp_total_flight_decrease(stcb, tp1);
2986
2987 tp1->whoTo->net_ack += tp1->send_size;
2988 if (tp1->snd_count < 2) {
2989 /*-
2990 * True non-retransmited chunk
2991 */
2992 tp1->whoTo->net_ack2 += tp1->send_size;
2993
2994 /*-
2995 * update RTO too ?
2996 */
2997 if (tp1->do_rtt) {
2998 tp1->whoTo->RTO =
2999 sctp_calculate_rto(stcb,
3000 &stcb->asoc,
3001 tp1->whoTo,
3002 &tp1->sent_rcv_time,
3003 sctp_align_safe_nocopy);
3004 tp1->do_rtt = 0;
3005 }
3006 }
3007 }
3008 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3009 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3010 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3011 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3012 stcb->asoc.this_sack_highest_gap,
3013 MAX_TSN)) {
3014 stcb->asoc.this_sack_highest_gap =
3015 tp1->rec.data.TSN_seq;
3016 }
3017 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3018 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3019 #ifdef SCTP_AUDITING_ENABLED
3020 sctp_audit_log(0xB2,
3021 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3022 #endif
3023 }
3024 }
3025 /*-
3026 * All chunks NOT UNSENT fall through here and are marked
3027 * (leave PR-SCTP ones that are to skip alone though)
3028 */
3029 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3030 tp1->sent = SCTP_DATAGRAM_MARKED;
3031
3032 if (tp1->rec.data.chunk_was_revoked) {
3033 /* deflate the cwnd */
3034 tp1->whoTo->cwnd -= tp1->book_size;
3035 tp1->rec.data.chunk_was_revoked = 0;
3036 }
3037 /* NR Sack code here */
3038 if (nr_sacking) {
3039 if (tp1->data) {
3040 /*
3041 * sa_ignore
3042 * NO_NULL_CHK
3043 */
3044 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3045 sctp_m_freem(tp1->data);
3046 tp1->data = NULL;
3047 }
3048 wake_him++;
3049 }
3050 }
3051 break;
3052 } /* if (tp1->TSN_seq == theTSN) */
3053 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3054 MAX_TSN))
3055 break;
3056
3057 tp1 = TAILQ_NEXT(tp1, sctp_next);
3058 if ((tp1 == NULL) && (circled == 0)) {
3059 circled++;
3060 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3061 }
3062 } /* end while (tp1) */
3063 if (tp1 == NULL) {
3064 circled = 0;
3065 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3066 }
3067 /* In case the fragments were not in order we must reset */
3068 } /* end for (j = fragStart */
3069 *p_tp1 = tp1;
3070 return (wake_him); /* Return value only used for nr-sack */
3071 }
3072
3073
3074 static int
3075 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3076 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3077 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3078 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3079 {
3080 struct sctp_gap_ack_block *frag, block;
3081 struct sctp_tmit_chunk *tp1;
3082 int i;
3083 int num_frs = 0;
3084 int chunk_freed;
3085 int non_revocable;
3086 uint16_t frag_strt, frag_end, prev_frag_end;
3087
3088 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3089 prev_frag_end = 0;
3090 chunk_freed = 0;
3091
3092 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3093 if (i == num_seg) {
3094 prev_frag_end = 0;
3095 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3096 }
3097 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3098 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3099 *offset += sizeof(block);
3100 if (frag == NULL) {
3101 return (chunk_freed);
3102 }
3103 frag_strt = ntohs(frag->start);
3104 frag_end = ntohs(frag->end);
3105
3106 if (frag_strt > frag_end) {
3107 /* This gap report is malformed, skip it. */
3108 continue;
3109 }
3110 if (frag_strt <= prev_frag_end) {
3111 /* This gap report is not in order, so restart. */
3112 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3113 }
3114 if (compare_with_wrap((last_tsn + frag_end), *biggest_tsn_acked, MAX_TSN)) {
3115 *biggest_tsn_acked = last_tsn + frag_end;
3116 }
3117 if (i < num_seg) {
3118 non_revocable = 0;
3119 } else {
3120 non_revocable = 1;
3121 }
3122 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3123 non_revocable, &num_frs, biggest_newly_acked_tsn,
3124 this_sack_lowest_newack, ecn_seg_sums)) {
3125 chunk_freed = 1;
3126 }
3127 prev_frag_end = frag_end;
3128 }
3129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3130 if (num_frs)
3131 sctp_log_fr(*biggest_tsn_acked,
3132 *biggest_newly_acked_tsn,
3133 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3134 }
3135 return (chunk_freed);
3136 }
3137
3138 static void
3139 sctp_check_for_revoked(struct sctp_tcb *stcb,
3140 struct sctp_association *asoc, uint32_t cumack,
3141 uint32_t biggest_tsn_acked)
3142 {
3143 struct sctp_tmit_chunk *tp1;
3144 int tot_revoked = 0;
3145
3146 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3147 while (tp1) {
3148 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3149 MAX_TSN)) {
3150 /*
3151 * ok this guy is either ACK or MARKED. If it is
3152 * ACKED it has been previously acked but not this
3153 * time i.e. revoked. If it is MARKED it was ACK'ed
3154 * again.
3155 */
3156 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3157 MAX_TSN))
3158 break;
3159
3160
3161 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3162 /* it has been revoked */
3163 tp1->sent = SCTP_DATAGRAM_SENT;
3164 tp1->rec.data.chunk_was_revoked = 1;
3165 /*
3166 * We must add this stuff back in to assure
3167 * timers and such get started.
3168 */
3169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3170 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3171 tp1->whoTo->flight_size,
3172 tp1->book_size,
3173 (uintptr_t) tp1->whoTo,
3174 tp1->rec.data.TSN_seq);
3175 }
3176 sctp_flight_size_increase(tp1);
3177 sctp_total_flight_increase(stcb, tp1);
3178 /*
3179 * We inflate the cwnd to compensate for our
3180 * artificial inflation of the flight_size.
3181 */
3182 tp1->whoTo->cwnd += tp1->book_size;
3183 tot_revoked++;
3184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3185 sctp_log_sack(asoc->last_acked_seq,
3186 cumack,
3187 tp1->rec.data.TSN_seq,
3188 0,
3189 0,
3190 SCTP_LOG_TSN_REVOKED);
3191 }
3192 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3193 /* it has been re-acked in this SACK */
3194 tp1->sent = SCTP_DATAGRAM_ACKED;
3195 }
3196 }
3197 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3198 break;
3199 tp1 = TAILQ_NEXT(tp1, sctp_next);
3200 }
3201 if (tot_revoked > 0) {
3202 /*
3203 * Setup the ecn nonce re-sync point. We do this since once
3204 * data is revoked we begin to retransmit things, which do
3205 * NOT have the ECN bits set. This means we are now out of
3206 * sync and must wait until we get back in sync with the
3207 * peer to check ECN bits.
3208 */
3209 tp1 = TAILQ_FIRST(&asoc->send_queue);
3210 if (tp1 == NULL) {
3211 asoc->nonce_resync_tsn = asoc->sending_seq;
3212 } else {
3213 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3214 }
3215 asoc->nonce_wait_for_ecne = 0;
3216 asoc->nonce_sum_check = 0;
3217 }
3218 }
3219
3220
3221 static void
3222 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3223 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3224 {
3225 struct sctp_tmit_chunk *tp1;
3226 int strike_flag = 0;
3227 struct timeval now;
3228 int tot_retrans = 0;
3229 uint32_t sending_seq;
3230 struct sctp_nets *net;
3231 int num_dests_sacked = 0;
3232
3233 /*
3234 * select the sending_seq, this is either the next thing ready to be
3235 * sent but not transmitted, OR, the next seq we assign.
3236 */
3237 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3238 if (tp1 == NULL) {
3239 sending_seq = asoc->sending_seq;
3240 } else {
3241 sending_seq = tp1->rec.data.TSN_seq;
3242 }
3243
3244 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3245 if ((asoc->sctp_cmt_on_off == 1) &&
3246 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3247 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3248 if (net->saw_newack)
3249 num_dests_sacked++;
3250 }
3251 }
3252 if (stcb->asoc.peer_supports_prsctp) {
3253 (void)SCTP_GETTIME_TIMEVAL(&now);
3254 }
3255 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3256 while (tp1) {
3257 strike_flag = 0;
3258 if (tp1->no_fr_allowed) {
3259 /* this one had a timeout or something */
3260 tp1 = TAILQ_NEXT(tp1, sctp_next);
3261 continue;
3262 }
3263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3264 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3265 sctp_log_fr(biggest_tsn_newly_acked,
3266 tp1->rec.data.TSN_seq,
3267 tp1->sent,
3268 SCTP_FR_LOG_CHECK_STRIKE);
3269 }
3270 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3271 MAX_TSN) ||
3272 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3273 /* done */
3274 break;
3275 }
3276 if (stcb->asoc.peer_supports_prsctp) {
3277 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3278 /* Is it expired? */
3279 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3280 /* Yes so drop it */
3281 if (tp1->data != NULL) {
3282 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3283 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3284 SCTP_SO_NOT_LOCKED);
3285 }
3286 tp1 = TAILQ_NEXT(tp1, sctp_next);
3287 continue;
3288 }
3289 }
3290 }
3291 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3292 asoc->this_sack_highest_gap, MAX_TSN)) {
3293 /* we are beyond the tsn in the sack */
3294 break;
3295 }
3296 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3297 /* either a RESEND, ACKED, or MARKED */
3298 /* skip */
3299 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3300 /* Continue strikin FWD-TSN chunks */
3301 tp1->rec.data.fwd_tsn_cnt++;
3302 }
3303 tp1 = TAILQ_NEXT(tp1, sctp_next);
3304 continue;
3305 }
3306 /*
3307 * CMT : SFR algo (covers part of DAC and HTNA as well)
3308 */
3309 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3310 /*
3311 * No new acks were receieved for data sent to this
3312 * dest. Therefore, according to the SFR algo for
3313 * CMT, no data sent to this dest can be marked for
3314 * FR using this SACK.
3315 */
3316 tp1 = TAILQ_NEXT(tp1, sctp_next);
3317 continue;
3318 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3319 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3320 /*
3321 * CMT: New acks were receieved for data sent to
3322 * this dest. But no new acks were seen for data
3323 * sent after tp1. Therefore, according to the SFR
3324 * algo for CMT, tp1 cannot be marked for FR using
3325 * this SACK. This step covers part of the DAC algo
3326 * and the HTNA algo as well.
3327 */
3328 tp1 = TAILQ_NEXT(tp1, sctp_next);
3329 continue;
3330 }
3331 /*
3332 * Here we check to see if we were have already done a FR
3333 * and if so we see if the biggest TSN we saw in the sack is
3334 * smaller than the recovery point. If so we don't strike
3335 * the tsn... otherwise we CAN strike the TSN.
3336 */
3337 /*
3338 * @@@ JRI: Check for CMT if (accum_moved &&
3339 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3340 * 0)) {
3341 */
3342 if (accum_moved && asoc->fast_retran_loss_recovery) {
3343 /*
3344 * Strike the TSN if in fast-recovery and cum-ack
3345 * moved.
3346 */
3347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3348 sctp_log_fr(biggest_tsn_newly_acked,
3349 tp1->rec.data.TSN_seq,
3350 tp1->sent,
3351 SCTP_FR_LOG_STRIKE_CHUNK);
3352 }
3353 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3354 tp1->sent++;
3355 }
3356 if ((asoc->sctp_cmt_on_off == 1) &&
3357 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3358 /*
3359 * CMT DAC algorithm: If SACK flag is set to
3360 * 0, then lowest_newack test will not pass
3361 * because it would have been set to the
3362 * cumack earlier. If not already to be
3363 * rtx'd, If not a mixed sack and if tp1 is
3364 * not between two sacked TSNs, then mark by
3365 * one more. NOTE that we are marking by one
3366 * additional time since the SACK DAC flag
3367 * indicates that two packets have been
3368 * received after this missing TSN.
3369 */
3370 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3371 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3373 sctp_log_fr(16 + num_dests_sacked,
3374 tp1->rec.data.TSN_seq,
3375 tp1->sent,
3376 SCTP_FR_LOG_STRIKE_CHUNK);
3377 }
3378 tp1->sent++;
3379 }
3380 }
3381 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3382 (asoc->sctp_cmt_on_off == 0)) {
3383 /*
3384 * For those that have done a FR we must take
3385 * special consideration if we strike. I.e the
3386 * biggest_newly_acked must be higher than the
3387 * sending_seq at the time we did the FR.
3388 */
3389 if (
3390 #ifdef SCTP_FR_TO_ALTERNATE
3391 /*
3392 * If FR's go to new networks, then we must only do
3393 * this for singly homed asoc's. However if the FR's
3394 * go to the same network (Armando's work) then its
3395 * ok to FR multiple times.
3396 */
3397 (asoc->numnets < 2)
3398 #else
3399 (1)
3400 #endif
3401 ) {
3402
3403 if ((compare_with_wrap(biggest_tsn_newly_acked,
3404 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3405 (biggest_tsn_newly_acked ==
3406 tp1->rec.data.fast_retran_tsn)) {
3407 /*
3408 * Strike the TSN, since this ack is
3409 * beyond where things were when we
3410 * did a FR.
3411 */
3412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3413 sctp_log_fr(biggest_tsn_newly_acked,
3414 tp1->rec.data.TSN_seq,
3415 tp1->sent,
3416 SCTP_FR_LOG_STRIKE_CHUNK);
3417 }
3418 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3419 tp1->sent++;
3420 }
3421 strike_flag = 1;
3422 if ((asoc->sctp_cmt_on_off == 1) &&
3423 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3424 /*
3425 * CMT DAC algorithm: If
3426 * SACK flag is set to 0,
3427 * then lowest_newack test
3428 * will not pass because it
3429 * would have been set to
3430 * the cumack earlier. If
3431 * not already to be rtx'd,
3432 * If not a mixed sack and
3433 * if tp1 is not between two
3434 * sacked TSNs, then mark by
3435 * one more. NOTE that we
3436 * are marking by one
3437 * additional time since the
3438 * SACK DAC flag indicates
3439 * that two packets have
3440 * been received after this
3441 * missing TSN.
3442 */
3443 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3444 (num_dests_sacked == 1) &&
3445 compare_with_wrap(this_sack_lowest_newack,
3446 tp1->rec.data.TSN_seq, MAX_TSN)) {
3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3448 sctp_log_fr(32 + num_dests_sacked,
3449 tp1->rec.data.TSN_seq,
3450 tp1->sent,
3451 SCTP_FR_LOG_STRIKE_CHUNK);
3452 }
3453 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3454 tp1->sent++;
3455 }
3456 }
3457 }
3458 }
3459 }
3460 /*
3461 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3462 * algo covers HTNA.
3463 */
3464 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3465 biggest_tsn_newly_acked, MAX_TSN)) {
3466 /*
3467 * We don't strike these: This is the HTNA
3468 * algorithm i.e. we don't strike If our TSN is
3469 * larger than the Highest TSN Newly Acked.
3470 */
3471 ;
3472 } else {
3473 /* Strike the TSN */
3474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3475 sctp_log_fr(biggest_tsn_newly_acked,
3476 tp1->rec.data.TSN_seq,
3477 tp1->sent,
3478 SCTP_FR_LOG_STRIKE_CHUNK);
3479 }
3480 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3481 tp1->sent++;
3482 }
3483 if ((asoc->sctp_cmt_on_off == 1) &&
3484 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3485 /*
3486 * CMT DAC algorithm: If SACK flag is set to
3487 * 0, then lowest_newack test will not pass
3488 * because it would have been set to the
3489 * cumack earlier. If not already to be
3490 * rtx'd, If not a mixed sack and if tp1 is
3491 * not between two sacked TSNs, then mark by
3492 * one more. NOTE that we are marking by one
3493 * additional time since the SACK DAC flag
3494 * indicates that two packets have been
3495 * received after this missing TSN.
3496 */
3497 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3498 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 sctp_log_fr(48 + num_dests_sacked,
3501 tp1->rec.data.TSN_seq,
3502 tp1->sent,
3503 SCTP_FR_LOG_STRIKE_CHUNK);
3504 }
3505 tp1->sent++;
3506 }
3507 }
3508 }
3509 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3510 struct sctp_nets *alt;
3511
3512 /* fix counts and things */
3513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3514 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3515 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3516 tp1->book_size,
3517 (uintptr_t) tp1->whoTo,
3518 tp1->rec.data.TSN_seq);
3519 }
3520 if (tp1->whoTo) {
3521 tp1->whoTo->net_ack++;
3522 sctp_flight_size_decrease(tp1);
3523 }
3524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3525 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3526 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3527 }
3528 /* add back to the rwnd */
3529 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3530
3531 /* remove from the total flight */
3532 sctp_total_flight_decrease(stcb, tp1);
3533
3534 if ((stcb->asoc.peer_supports_prsctp) &&
3535 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3536 /*
3537 * Has it been retransmitted tv_sec times? -
3538 * we store the retran count there.
3539 */
3540 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3541 /* Yes, so drop it */
3542 if (tp1->data != NULL) {
3543 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3544 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3545 SCTP_SO_NOT_LOCKED);
3546 }
3547 /* Make sure to flag we had a FR */
3548 tp1->whoTo->net_ack++;
3549 tp1 = TAILQ_NEXT(tp1, sctp_next);
3550 continue;
3551 }
3552 }
3553 /* printf("OK, we are now ready to FR this guy\n"); */
3554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3555 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3556 0, SCTP_FR_MARKED);
3557 }
3558 if (strike_flag) {
3559 /* This is a subsequent FR */
3560 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3561 }
3562 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3563 if (asoc->sctp_cmt_on_off == 1) {
3564 /*
3565 * CMT: Using RTX_SSTHRESH policy for CMT.
3566 * If CMT is being used, then pick dest with
3567 * largest ssthresh for any retransmission.
3568 */
3569 tp1->no_fr_allowed = 1;
3570 alt = tp1->whoTo;
3571 /* sa_ignore NO_NULL_CHK */
3572 if (asoc->sctp_cmt_pf > 0) {
3573 /*
3574 * JRS 5/18/07 - If CMT PF is on,
3575 * use the PF version of
3576 * find_alt_net()
3577 */
3578 alt = sctp_find_alternate_net(stcb, alt, 2);
3579 } else {
3580 /*
3581 * JRS 5/18/07 - If only CMT is on,
3582 * use the CMT version of
3583 * find_alt_net()
3584 */
3585 /* sa_ignore NO_NULL_CHK */
3586 alt = sctp_find_alternate_net(stcb, alt, 1);
3587 }
3588 if (alt == NULL) {
3589 alt = tp1->whoTo;
3590 }
3591 /*
3592 * CUCv2: If a different dest is picked for
3593 * the retransmission, then new
3594 * (rtx-)pseudo_cumack needs to be tracked
3595 * for orig dest. Let CUCv2 track new (rtx-)
3596 * pseudo-cumack always.
3597 */
3598 if (tp1->whoTo) {
3599 tp1->whoTo->find_pseudo_cumack = 1;
3600 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3601 }
3602 } else {/* CMT is OFF */
3603
3604 #ifdef SCTP_FR_TO_ALTERNATE
3605 /* Can we find an alternate? */
3606 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3607 #else
3608 /*
3609 * default behavior is to NOT retransmit
3610 * FR's to an alternate. Armando Caro's
3611 * paper details why.
3612 */
3613 alt = tp1->whoTo;
3614 #endif
3615 }
3616
3617 tp1->rec.data.doing_fast_retransmit = 1;
3618 tot_retrans++;
3619 /* mark the sending seq for possible subsequent FR's */
3620 /*
3621 * printf("Marking TSN for FR new value %x\n",
3622 * (uint32_t)tpi->rec.data.TSN_seq);
3623 */
3624 if (TAILQ_EMPTY(&asoc->send_queue)) {
3625 /*
3626 * If the queue of send is empty then its
3627 * the next sequence number that will be
3628 * assigned so we subtract one from this to
3629 * get the one we last sent.
3630 */
3631 tp1->rec.data.fast_retran_tsn = sending_seq;
3632 } else {
3633 /*
3634 * If there are chunks on the send queue
3635 * (unsent data that has made it from the
3636 * stream queues but not out the door, we
3637 * take the first one (which will have the
3638 * lowest TSN) and subtract one to get the
3639 * one we last sent.
3640 */
3641 struct sctp_tmit_chunk *ttt;
3642
3643 ttt = TAILQ_FIRST(&asoc->send_queue);
3644 tp1->rec.data.fast_retran_tsn =
3645 ttt->rec.data.TSN_seq;
3646 }
3647
3648 if (tp1->do_rtt) {
3649 /*
3650 * this guy had a RTO calculation pending on
3651 * it, cancel it
3652 */
3653 tp1->do_rtt = 0;
3654 }
3655 if (alt != tp1->whoTo) {
3656 /* yes, there is an alternate. */
3657 sctp_free_remote_addr(tp1->whoTo);
3658 /* sa_ignore FREED_MEMORY */
3659 tp1->whoTo = alt;
3660 atomic_add_int(&alt->ref_count, 1);
3661 }
3662 }
3663 tp1 = TAILQ_NEXT(tp1, sctp_next);
3664 } /* while (tp1) */
3665
3666 if (tot_retrans > 0) {
3667 /*
3668 * Setup the ecn nonce re-sync point. We do this since once
3669 * we go to FR something we introduce a Karn's rule scenario
3670 * and won't know the totals for the ECN bits.
3671 */
3672 asoc->nonce_resync_tsn = sending_seq;
3673 asoc->nonce_wait_for_ecne = 0;
3674 asoc->nonce_sum_check = 0;
3675 }
3676 }
3677
3678 struct sctp_tmit_chunk *
3679 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3680 struct sctp_association *asoc)
3681 {
3682 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3683 struct timeval now;
3684 int now_filled = 0;
3685
3686 if (asoc->peer_supports_prsctp == 0) {
3687 return (NULL);
3688 }
3689 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3690 while (tp1) {
3691 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3692 tp1->sent != SCTP_DATAGRAM_RESEND) {
3693 /* no chance to advance, out of here */
3694 break;
3695 }
3696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3697 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3698 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3699 asoc->advanced_peer_ack_point,
3700 tp1->rec.data.TSN_seq, 0, 0);
3701 }
3702 }
3703 if (!PR_SCTP_ENABLED(tp1->flags)) {
3704 /*
3705 * We can't fwd-tsn past any that are reliable aka
3706 * retransmitted until the asoc fails.
3707 */
3708 break;
3709 }
3710 if (!now_filled) {
3711 (void)SCTP_GETTIME_TIMEVAL(&now);
3712 now_filled = 1;
3713 }
3714 tp2 = TAILQ_NEXT(tp1, sctp_next);
3715 /*
3716 * now we got a chunk which is marked for another
3717 * retransmission to a PR-stream but has run out its chances
3718 * already maybe OR has been marked to skip now. Can we skip
3719 * it if its a resend?
3720 */
3721 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3722 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3723 /*
3724 * Now is this one marked for resend and its time is
3725 * now up?
3726 */
3727 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3728 /* Yes so drop it */
3729 if (tp1->data) {
3730 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3731 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3732 SCTP_SO_NOT_LOCKED);
3733 }
3734 } else {
3735 /*
3736 * No, we are done when hit one for resend
3737 * whos time as not expired.
3738 */
3739 break;
3740 }
3741 }
3742 /*
3743 * Ok now if this chunk is marked to drop it we can clean up
3744 * the chunk, advance our peer ack point and we can check
3745 * the next chunk.
3746 */
3747 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3748 /* advance PeerAckPoint goes forward */
3749 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3750 asoc->advanced_peer_ack_point,
3751 MAX_TSN)) {
3752
3753 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3754 a_adv = tp1;
3755 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3756 /* No update but we do save the chk */
3757 a_adv = tp1;
3758 }
3759 } else {
3760 /*
3761 * If it is still in RESEND we can advance no
3762 * further
3763 */
3764 break;
3765 }
3766 /*
3767 * If we hit here we just dumped tp1, move to next tsn on
3768 * sent queue.
3769 */
3770 tp1 = tp2;
3771 }
3772 return (a_adv);
3773 }
3774
3775 static int
3776 sctp_fs_audit(struct sctp_association *asoc)
3777 {
3778 struct sctp_tmit_chunk *chk;
3779 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3780 int entry_flight, entry_cnt, ret;
3781
3782 entry_flight = asoc->total_flight;
3783 entry_cnt = asoc->total_flight_count;
3784 ret = 0;
3785
3786 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3787 return (0);
3788
3789 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3790 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3791 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3792 chk->rec.data.TSN_seq,
3793 chk->send_size,
3794 chk->snd_count
3795 );
3796 inflight++;
3797 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3798 resend++;
3799 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3800 inbetween++;
3801 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3802 above++;
3803 } else {
3804 acked++;
3805 }
3806 }
3807
3808 if ((inflight > 0) || (inbetween > 0)) {
3809 #ifdef INVARIANTS
3810 panic("Flight size-express incorrect? \n");
3811 #else
3812 printf("asoc->total_flight:%d cnt:%d\n",
3813 entry_flight, entry_cnt);
3814
3815 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3816 inflight, inbetween, resend, above, acked);
3817 ret = 1;
3818 #endif
3819 }
3820 return (ret);
3821 }
3822
3823
3824 static void
3825 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3826 struct sctp_association *asoc,
3827 struct sctp_nets *net,
3828 struct sctp_tmit_chunk *tp1)
3829 {
3830 tp1->window_probe = 0;
3831 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3832 /* TSN's skipped we do NOT move back. */
3833 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3834 tp1->whoTo->flight_size,
3835 tp1->book_size,
3836 (uintptr_t) tp1->whoTo,
3837 tp1->rec.data.TSN_seq);
3838 return;
3839 }
3840 /* First setup this by shrinking flight */
3841 sctp_flight_size_decrease(tp1);
3842 sctp_total_flight_decrease(stcb, tp1);
3843 /* Now mark for resend */
3844 tp1->sent = SCTP_DATAGRAM_RESEND;
3845 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3846
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3848 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3849 tp1->whoTo->flight_size,
3850 tp1->book_size,
3851 (uintptr_t) tp1->whoTo,
3852 tp1->rec.data.TSN_seq);
3853 }
3854 }
3855
3856 void
3857 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3858 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3859 {
3860 struct sctp_nets *net;
3861 struct sctp_association *asoc;
3862 struct sctp_tmit_chunk *tp1, *tp2;
3863 uint32_t old_rwnd;
3864 int win_probe_recovery = 0;
3865 int win_probe_recovered = 0;
3866 int j, done_once = 0;
3867
3868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3869 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3870 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3871 }
3872 SCTP_TCB_LOCK_ASSERT(stcb);
3873 #ifdef SCTP_ASOCLOG_OF_TSNS
3874 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3875 stcb->asoc.cumack_log_at++;
3876 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3877 stcb->asoc.cumack_log_at = 0;
3878 }
3879 #endif
3880 asoc = &stcb->asoc;
3881 old_rwnd = asoc->peers_rwnd;
3882 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3883 /* old ack */
3884 return;
3885 } else if (asoc->last_acked_seq == cumack) {
3886 /* Window update sack */
3887 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3888 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3889 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3890 /* SWS sender side engages */
3891 asoc->peers_rwnd = 0;
3892 }
3893 if (asoc->peers_rwnd > old_rwnd) {
3894 goto again;
3895 }
3896 return;
3897 }
3898 /* First setup for CC stuff */
3899 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3900 net->prev_cwnd = net->cwnd;
3901 net->net_ack = 0;
3902 net->net_ack2 = 0;
3903
3904 /*
3905 * CMT: Reset CUC and Fast recovery algo variables before
3906 * SACK processing
3907 */
3908 net->new_pseudo_cumack = 0;
3909 net->will_exit_fast_recovery = 0;
3910 }
3911 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3912 uint32_t send_s;
3913
3914 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3915 tp1 = TAILQ_LAST(&asoc->sent_queue,
3916 sctpchunk_listhead);
3917 send_s = tp1->rec.data.TSN_seq + 1;
3918 } else {
3919 send_s = asoc->sending_seq;
3920 }
3921 if ((cumack == send_s) ||
3922 compare_with_wrap(cumack, send_s, MAX_TSN)) {
3923 #ifndef INVARIANTS
3924 struct mbuf *oper;
3925
3926 #endif
3927 #ifdef INVARIANTS
3928 panic("Impossible sack 1");
3929 #else
3930
3931 *abort_now = 1;
3932 /* XXX */
3933 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3934 0, M_DONTWAIT, 1, MT_DATA);
3935 if (oper) {
3936 struct sctp_paramhdr *ph;
3937 uint32_t *ippp;
3938
3939 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3940 sizeof(uint32_t);
3941 ph = mtod(oper, struct sctp_paramhdr *);
3942 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3943 ph->param_length = htons(SCTP_BUF_LEN(oper));
3944 ippp = (uint32_t *) (ph + 1);
3945 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3946 }
3947 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3948 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3949 return;
3950 #endif
3951 }
3952 }
3953 asoc->this_sack_highest_gap = cumack;
3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3955 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3956 stcb->asoc.overall_error_count,
3957 0,
3958 SCTP_FROM_SCTP_INDATA,
3959 __LINE__);
3960 }
3961 stcb->asoc.overall_error_count = 0;
3962 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3963 /* process the new consecutive TSN first */
3964 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3965 while (tp1) {
3966 tp2 = TAILQ_NEXT(tp1, sctp_next);
3967 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3968 MAX_TSN) ||
3969 cumack == tp1->rec.data.TSN_seq) {
3970 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3971 printf("Warning, an unsent is now acked?\n");
3972 }
3973 /*
3974 * ECN Nonce: Add the nonce to the sender's
3975 * nonce sum
3976 */
3977 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3978 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3979 /*
3980 * If it is less than ACKED, it is
3981 * now no-longer in flight. Higher
3982 * values may occur during marking
3983 */
3984 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3986 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3987 tp1->whoTo->flight_size,
3988 tp1->book_size,
3989 (uintptr_t) tp1->whoTo,
3990 tp1->rec.data.TSN_seq);
3991 }
3992 sctp_flight_size_decrease(tp1);
3993 /* sa_ignore NO_NULL_CHK */
3994 sctp_total_flight_decrease(stcb, tp1);
3995 }
3996 tp1->whoTo->net_ack += tp1->send_size;
3997 if (tp1->snd_count < 2) {
3998 /*
3999 * True non-retransmited
4000 * chunk
4001 */
4002 tp1->whoTo->net_ack2 +=
4003 tp1->send_size;
4004
4005 /* update RTO too? */
4006 if (tp1->do_rtt) {
4007 tp1->whoTo->RTO =
4008 /*
4009 * sa_ignore
4010 * NO_NULL_CHK
4011 */
4012 sctp_calculate_rto(stcb,
4013 asoc, tp1->whoTo,
4014 &tp1->sent_rcv_time,
4015 sctp_align_safe_nocopy);
4016 tp1->do_rtt = 0;
4017 }
4018 }
4019 /*
4020 * CMT: CUCv2 algorithm. From the
4021 * cumack'd TSNs, for each TSN being
4022 * acked for the first time, set the
4023 * following variables for the
4024 * corresp destination.
4025 * new_pseudo_cumack will trigger a
4026 * cwnd update.
4027 * find_(rtx_)pseudo_cumack will
4028 * trigger search for the next
4029 * expected (rtx-)pseudo-cumack.
4030 */
4031 tp1->whoTo->new_pseudo_cumack = 1;
4032 tp1->whoTo->find_pseudo_cumack = 1;
4033 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4034
4035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4036 /* sa_ignore NO_NULL_CHK */
4037 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4038 }
4039 }
4040 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4041 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4042 }
4043 if (tp1->rec.data.chunk_was_revoked) {
4044 /* deflate the cwnd */
4045 tp1->whoTo->cwnd -= tp1->book_size;
4046 tp1->rec.data.chunk_was_revoked = 0;
4047 }
4048 tp1->sent = SCTP_DATAGRAM_ACKED;
4049 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4050 if (tp1->data) {
4051 /* sa_ignore NO_NULL_CHK */
4052 sctp_free_bufspace(stcb, asoc, tp1, 1);
4053 sctp_m_freem(tp1->data);
4054 }
4055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4056 sctp_log_sack(asoc->last_acked_seq,
4057 cumack,
4058 tp1->rec.data.TSN_seq,
4059 0,
4060 0,
4061 SCTP_LOG_FREE_SENT);
4062 }
4063 tp1->data = NULL;
4064 asoc->sent_queue_cnt--;
4065 sctp_free_a_chunk(stcb, tp1);
4066 tp1 = tp2;
4067 } else {
4068 break;
4069 }
4070 }
4071
4072 }
4073 /* sa_ignore NO_NULL_CHK */
4074 if (stcb->sctp_socket) {
4075 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4076 struct socket *so;
4077
4078 #endif
4079 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4081 /* sa_ignore NO_NULL_CHK */
4082 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4083 }
4084 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085 so = SCTP_INP_SO(stcb->sctp_ep);
4086 atomic_add_int(&stcb->asoc.refcnt, 1);
4087 SCTP_TCB_UNLOCK(stcb);
4088 SCTP_SOCKET_LOCK(so, 1);
4089 SCTP_TCB_LOCK(stcb);
4090 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4091 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4092 /* assoc was freed while we were unlocked */
4093 SCTP_SOCKET_UNLOCK(so, 1);
4094 return;
4095 }
4096 #endif
4097 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4099 SCTP_SOCKET_UNLOCK(so, 1);
4100 #endif
4101 } else {
4102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4103 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4104 }
4105 }
4106
4107 /* JRS - Use the congestion control given in the CC module */
4108 if (asoc->last_acked_seq != cumack)
4109 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4110
4111 asoc->last_acked_seq = cumack;
4112
4113 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4114 /* nothing left in-flight */
4115 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4116 net->flight_size = 0;
4117 net->partial_bytes_acked = 0;
4118 }
4119 asoc->total_flight = 0;
4120 asoc->total_flight_count = 0;
4121 }
4122 /* ECN Nonce updates */
4123 if (asoc->ecn_nonce_allowed) {
4124 if (asoc->nonce_sum_check) {
4125 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4126 if (asoc->nonce_wait_for_ecne == 0) {
4127 struct sctp_tmit_chunk *lchk;
4128
4129 lchk = TAILQ_FIRST(&asoc->send_queue);
4130 asoc->nonce_wait_for_ecne = 1;
4131 if (lchk) {
4132 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4133 } else {
4134 asoc->nonce_wait_tsn = asoc->sending_seq;
4135 }
4136 } else {
4137 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4138 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4139 /*
4140 * Misbehaving peer. We need
4141 * to react to this guy
4142 */
4143 asoc->ecn_allowed = 0;
4144 asoc->ecn_nonce_allowed = 0;
4145 }
4146 }
4147 }
4148 } else {
4149 /* See if Resynchronization Possible */
4150 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4151 asoc->nonce_sum_check = 1;
4152 /*
4153 * Now we must calculate what the base is.
4154 * We do this based on two things, we know
4155 * the total's for all the segments
4156 * gap-acked in the SACK (none). We also
4157 * know the SACK's nonce sum, its in
4158 * nonce_sum_flag. So we can build a truth
4159 * table to back-calculate the new value of
4160 * asoc->nonce_sum_expect_base:
4161 *
4162 * SACK-flag-Value Seg-Sums Base 0 0 0
4163 * 1 0 1 0 1 1 1
4164 * 1 0
4165 */
4166 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4167 }
4168 }
4169 }
4170 /* RWND update */
4171 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4172 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4173 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4174 /* SWS sender side engages */
4175 asoc->peers_rwnd = 0;
4176 }
4177 if (asoc->peers_rwnd > old_rwnd) {
4178 win_probe_recovery = 1;
4179 }
4180 /* Now assure a timer where data is queued at */
4181 again:
4182 j = 0;
4183 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4184 int to_ticks;
4185
4186 if (win_probe_recovery && (net->window_probe)) {
4187 win_probe_recovered = 1;
4188 /*
4189 * Find first chunk that was used with window probe
4190 * and clear the sent
4191 */
4192 /* sa_ignore FREED_MEMORY */
4193 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4194 if (tp1->window_probe) {
4195 /* move back to data send queue */
4196 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4197 break;
4198 }
4199 }
4200 }
4201 if (net->RTO == 0) {
4202 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4203 } else {
4204 to_ticks = MSEC_TO_TICKS(net->RTO);
4205 }
4206 if (net->flight_size) {
4207 j++;
4208 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4209 sctp_timeout_handler, &net->rxt_timer);
4210 if (net->window_probe) {
4211 net->window_probe = 0;
4212 }
4213 } else {
4214 if (net->window_probe) {
4215 /*
4216 * In window probes we must assure a timer
4217 * is still running there
4218 */
4219 net->window_probe = 0;
4220 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4221 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4222 sctp_timeout_handler, &net->rxt_timer);
4223 }
4224 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4225 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4226 stcb, net,
4227 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4228 }
4229 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4230 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4231 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4232 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4233 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4234 }
4235 }
4236 }
4237 }
4238 if ((j == 0) &&
4239 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4240 (asoc->sent_queue_retran_cnt == 0) &&
4241 (win_probe_recovered == 0) &&
4242 (done_once == 0)) {
4243 /*
4244 * huh, this should not happen unless all packets are
4245 * PR-SCTP and marked to skip of course.
4246 */
4247 if (sctp_fs_audit(asoc)) {
4248 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4249 net->flight_size = 0;
4250 }
4251 asoc->total_flight = 0;
4252 asoc->total_flight_count = 0;
4253 asoc->sent_queue_retran_cnt = 0;
4254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4255 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4256 sctp_flight_size_increase(tp1);
4257 sctp_total_flight_increase(stcb, tp1);
4258 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4259 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4260 }
4261 }
4262 }
4263 done_once = 1;
4264 goto again;
4265 }
4266 /**********************************/
4267 /* Now what about shutdown issues */
4268 /**********************************/
4269 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4270 /* nothing left on sendqueue.. consider done */
4271 /* clean up */
4272 if ((asoc->stream_queue_cnt == 1) &&
4273 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4274 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4275 (asoc->locked_on_sending)
4276 ) {
4277 struct sctp_stream_queue_pending *sp;
4278
4279 /*
4280 * I may be in a state where we got all across.. but
4281 * cannot write more due to a shutdown... we abort
4282 * since the user did not indicate EOR in this case.
4283 * The sp will be cleaned during free of the asoc.
4284 */
4285 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4286 sctp_streamhead);
4287 if ((sp) && (sp->length == 0)) {
4288 /* Let cleanup code purge it */
4289 if (sp->msg_is_complete) {
4290 asoc->stream_queue_cnt--;
4291 } else {
4292 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4293 asoc->locked_on_sending = NULL;
4294 asoc->stream_queue_cnt--;
4295 }
4296 }
4297 }
4298 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4299 (asoc->stream_queue_cnt == 0)) {
4300 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4301 /* Need to abort here */
4302 struct mbuf *oper;
4303
4304 abort_out_now:
4305 *abort_now = 1;
4306 /* XXX */
4307 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4308 0, M_DONTWAIT, 1, MT_DATA);
4309 if (oper) {
4310 struct sctp_paramhdr *ph;
4311 uint32_t *ippp;
4312
4313 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4314 sizeof(uint32_t);
4315 ph = mtod(oper, struct sctp_paramhdr *);
4316 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4317 ph->param_length = htons(SCTP_BUF_LEN(oper));
4318 ippp = (uint32_t *) (ph + 1);
4319 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4320 }
4321 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4322 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4323 } else {
4324 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4325 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4326 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4327 }
4328 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4329 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4330 sctp_stop_timers_for_shutdown(stcb);
4331 sctp_send_shutdown(stcb,
4332 stcb->asoc.primary_destination);
4333 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4334 stcb->sctp_ep, stcb, asoc->primary_destination);
4335 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4336 stcb->sctp_ep, stcb, asoc->primary_destination);
4337 }
4338 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4339 (asoc->stream_queue_cnt == 0)) {
4340 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4341 goto abort_out_now;
4342 }
4343 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4344 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4345 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4346 sctp_send_shutdown_ack(stcb,
4347 stcb->asoc.primary_destination);
4348 sctp_stop_timers_for_shutdown(stcb);
4349 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4350 stcb->sctp_ep, stcb, asoc->primary_destination);
4351 }
4352 }
4353 /*********************************************/
4354 /* Here we perform PR-SCTP procedures */
4355 /* (section 4.2) */
4356 /*********************************************/
4357 /* C1. update advancedPeerAckPoint */
4358 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4359 asoc->advanced_peer_ack_point = cumack;
4360 }
4361 /* PR-Sctp issues need to be addressed too */
4362 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4363 struct sctp_tmit_chunk *lchk;
4364 uint32_t old_adv_peer_ack_point;
4365
4366 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4367 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4368 /* C3. See if we need to send a Fwd-TSN */
4369 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4370 MAX_TSN)) {
4371 /*
4372 * ISSUE with ECN, see FWD-TSN processing for notes
4373 * on issues that will occur when the ECN NONCE
4374 * stuff is put into SCTP for cross checking.
4375 */
4376 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4377 MAX_TSN)) {
4378 send_forward_tsn(stcb, asoc);
4379 /*
4380 * ECN Nonce: Disable Nonce Sum check when
4381 * FWD TSN is sent and store resync tsn
4382 */
4383 asoc->nonce_sum_check = 0;
4384 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4385 } else if (lchk) {
4386 /* try to FR fwd-tsn's that get lost too */
4387 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4388 send_forward_tsn(stcb, asoc);
4389 }
4390 }
4391 }
4392 if (lchk) {
4393 /* Assure a timer is up */
4394 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4395 stcb->sctp_ep, stcb, lchk->whoTo);
4396 }
4397 }
4398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4399 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4400 rwnd,
4401 stcb->asoc.peers_rwnd,
4402 stcb->asoc.total_flight,
4403 stcb->asoc.total_output_queue_size);
4404 }
4405 }
4406
4407 void
4408 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4409 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4410 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4411 int *abort_now, uint8_t flags,
4412 uint32_t cum_ack, uint32_t rwnd)
4413 {
4414 struct sctp_association *asoc;
4415 struct sctp_tmit_chunk *tp1, *tp2;
4416 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4417 uint32_t sav_cum_ack;
4418 uint16_t wake_him = 0;
4419 uint32_t send_s = 0;
4420 long j;
4421 int accum_moved = 0;
4422 int will_exit_fast_recovery = 0;
4423 uint32_t a_rwnd, old_rwnd;
4424 int win_probe_recovery = 0;
4425 int win_probe_recovered = 0;
4426 struct sctp_nets *net = NULL;
4427 int nonce_sum_flag, ecn_seg_sums = 0;
4428 int done_once;
4429 uint8_t reneged_all = 0;
4430 uint8_t cmt_dac_flag;
4431
4432 /*
4433 * we take any chance we can to service our queues since we cannot
4434 * get awoken when the socket is read from :<
4435 */
4436 /*
4437 * Now perform the actual SACK handling: 1) Verify that it is not an
4438 * old sack, if so discard. 2) If there is nothing left in the send
4439 * queue (cum-ack is equal to last acked) then you have a duplicate
4440 * too, update any rwnd change and verify no timers are running.
4441 * then return. 3) Process any new consequtive data i.e. cum-ack
4442 * moved process these first and note that it moved. 4) Process any
4443 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4444 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4445 * sync up flightsizes and things, stop all timers and also check
4446 * for shutdown_pending state. If so then go ahead and send off the
4447 * shutdown. If in shutdown recv, send off the shutdown-ack and
4448 * start that timer, Ret. 9) Strike any non-acked things and do FR
4449 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4450 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4451 * if in shutdown_recv state.
4452 */
4453 SCTP_TCB_LOCK_ASSERT(stcb);
4454 /* CMT DAC algo */
4455 this_sack_lowest_newack = 0;
4456 j = 0;
4457 SCTP_STAT_INCR(sctps_slowpath_sack);
4458 last_tsn = cum_ack;
4459 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4460 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4461 #ifdef SCTP_ASOCLOG_OF_TSNS
4462 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4463 stcb->asoc.cumack_log_at++;
4464 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4465 stcb->asoc.cumack_log_at = 0;
4466 }
4467 #endif
4468 a_rwnd = rwnd;
4469
4470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4471 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4472 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4473 }
4474 old_rwnd = stcb->asoc.peers_rwnd;
4475 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4476 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4477 stcb->asoc.overall_error_count,
4478 0,
4479 SCTP_FROM_SCTP_INDATA,
4480 __LINE__);
4481 }
4482 stcb->asoc.overall_error_count = 0;
4483 asoc = &stcb->asoc;
4484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4485 sctp_log_sack(asoc->last_acked_seq,
4486 cum_ack,
4487 0,
4488 num_seg,
4489 num_dup,
4490 SCTP_LOG_NEW_SACK);
4491 }
4492 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4493 uint16_t i;
4494 uint32_t *dupdata, dblock;
4495
4496 for (i = 0; i < num_dup; i++) {
4497 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4498 sizeof(uint32_t), (uint8_t *) & dblock);
4499 if (dupdata == NULL) {
4500 break;
4501 }
4502 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4503 }
4504 }
4505 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4506 /* reality check */
4507 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4508 tp1 = TAILQ_LAST(&asoc->sent_queue,
4509 sctpchunk_listhead);
4510 send_s = tp1->rec.data.TSN_seq + 1;
4511 } else {
4512 tp1 = NULL;
4513 send_s = asoc->sending_seq;
4514 }
4515 if (cum_ack == send_s ||
4516 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4517 struct mbuf *oper;
4518
4519 /*
4520 * no way, we have not even sent this TSN out yet.
4521 * Peer is hopelessly messed up with us.
4522 */
4523 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4524 cum_ack, send_s);
4525 if (tp1) {
4526 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4527 tp1->rec.data.TSN_seq, tp1);
4528 }
4529 hopeless_peer:
4530 *abort_now = 1;
4531 /* XXX */
4532 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4533 0, M_DONTWAIT, 1, MT_DATA);
4534 if (oper) {
4535 struct sctp_paramhdr *ph;
4536 uint32_t *ippp;
4537
4538 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4539 sizeof(uint32_t);
4540 ph = mtod(oper, struct sctp_paramhdr *);
4541 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4542 ph->param_length = htons(SCTP_BUF_LEN(oper));
4543 ippp = (uint32_t *) (ph + 1);
4544 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4545 }
4546 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4547 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4548 return;
4549 }
4550 }
4551 /**********************/
4552 /* 1) check the range */
4553 /**********************/
4554 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4555 /* acking something behind */
4556 return;
4557 }
4558 sav_cum_ack = asoc->last_acked_seq;
4559
4560 /* update the Rwnd of the peer */
4561 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4562 TAILQ_EMPTY(&asoc->send_queue) &&
4563 (asoc->stream_queue_cnt == 0)) {
4564 /* nothing left on send/sent and strmq */
4565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4566 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4567 asoc->peers_rwnd, 0, 0, a_rwnd);
4568 }
4569 asoc->peers_rwnd = a_rwnd;
4570 if (asoc->sent_queue_retran_cnt) {
4571 asoc->sent_queue_retran_cnt = 0;
4572 }
4573 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4574 /* SWS sender side engages */
4575 asoc->peers_rwnd = 0;
4576 }
4577 /* stop any timers */
4578 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4580 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4581 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4582 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4583 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4584 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4585 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4586 }
4587 }
4588 net->partial_bytes_acked = 0;
4589 net->flight_size = 0;
4590 }
4591 asoc->total_flight = 0;
4592 asoc->total_flight_count = 0;
4593 return;
4594 }
4595 /*
4596 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4597 * things. The total byte count acked is tracked in netAckSz AND
4598 * netAck2 is used to track the total bytes acked that are un-
4599 * amibguious and were never retransmitted. We track these on a per
4600 * destination address basis.
4601 */
4602 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4603 net->prev_cwnd = net->cwnd;
4604 net->net_ack = 0;
4605 net->net_ack2 = 0;
4606
4607 /*
4608 * CMT: Reset CUC and Fast recovery algo variables before
4609 * SACK processing
4610 */
4611 net->new_pseudo_cumack = 0;
4612 net->will_exit_fast_recovery = 0;
4613 }
4614 /* process the new consecutive TSN first */
4615 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4616 while (tp1) {
4617 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4618 MAX_TSN) ||
4619 last_tsn == tp1->rec.data.TSN_seq) {
4620 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4621 /*
4622 * ECN Nonce: Add the nonce to the sender's
4623 * nonce sum
4624 */
4625 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4626 accum_moved = 1;
4627 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4628 /*
4629 * If it is less than ACKED, it is
4630 * now no-longer in flight. Higher
4631 * values may occur during marking
4632 */
4633 if ((tp1->whoTo->dest_state &
4634 SCTP_ADDR_UNCONFIRMED) &&
4635 (tp1->snd_count < 2)) {
4636 /*
4637 * If there was no retran
4638 * and the address is
4639 * un-confirmed and we sent
4640 * there and are now
4641 * sacked.. its confirmed,
4642 * mark it so.
4643 */
4644 tp1->whoTo->dest_state &=
4645 ~SCTP_ADDR_UNCONFIRMED;
4646 }
4647 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4649 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4650 tp1->whoTo->flight_size,
4651 tp1->book_size,
4652 (uintptr_t) tp1->whoTo,
4653 tp1->rec.data.TSN_seq);
4654 }
4655 sctp_flight_size_decrease(tp1);
4656 sctp_total_flight_decrease(stcb, tp1);
4657 }
4658 tp1->whoTo->net_ack += tp1->send_size;
4659
4660 /* CMT SFR and DAC algos */
4661 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4662 tp1->whoTo->saw_newack = 1;
4663
4664 if (tp1->snd_count < 2) {
4665 /*
4666 * True non-retransmited
4667 * chunk
4668 */
4669 tp1->whoTo->net_ack2 +=
4670 tp1->send_size;
4671
4672 /* update RTO too? */
4673 if (tp1->do_rtt) {
4674 tp1->whoTo->RTO =
4675 sctp_calculate_rto(stcb,
4676 asoc, tp1->whoTo,
4677 &tp1->sent_rcv_time,
4678 sctp_align_safe_nocopy);
4679 tp1->do_rtt = 0;
4680 }
4681 }
4682 /*
4683 * CMT: CUCv2 algorithm. From the
4684 * cumack'd TSNs, for each TSN being
4685 * acked for the first time, set the
4686 * following variables for the
4687 * corresp destination.
4688 * new_pseudo_cumack will trigger a
4689 * cwnd update.
4690 * find_(rtx_)pseudo_cumack will
4691 * trigger search for the next
4692 * expected (rtx-)pseudo-cumack.
4693 */
4694 tp1->whoTo->new_pseudo_cumack = 1;
4695 tp1->whoTo->find_pseudo_cumack = 1;
4696 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4697
4698
4699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4700 sctp_log_sack(asoc->last_acked_seq,
4701 cum_ack,
4702 tp1->rec.data.TSN_seq,
4703 0,
4704 0,
4705 SCTP_LOG_TSN_ACKED);
4706 }
4707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4708 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4709 }
4710 }
4711 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4712 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4713 #ifdef SCTP_AUDITING_ENABLED
4714 sctp_audit_log(0xB3,
4715 (asoc->sent_queue_retran_cnt & 0x000000ff));
4716 #endif
4717 }
4718 if (tp1->rec.data.chunk_was_revoked) {
4719 /* deflate the cwnd */
4720 tp1->whoTo->cwnd -= tp1->book_size;
4721 tp1->rec.data.chunk_was_revoked = 0;
4722 }
4723 tp1->sent = SCTP_DATAGRAM_ACKED;
4724 }
4725 } else {
4726 break;
4727 }
4728 tp1 = TAILQ_NEXT(tp1, sctp_next);
4729 }
4730 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4731 /* always set this up to cum-ack */
4732 asoc->this_sack_highest_gap = last_tsn;
4733
4734 if ((num_seg > 0) || (num_nr_seg > 0)) {
4735
4736 /*
4737 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4738 * to be greater than the cumack. Also reset saw_newack to 0
4739 * for all dests.
4740 */
4741 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4742 net->saw_newack = 0;
4743 net->this_sack_highest_newack = last_tsn;
4744 }
4745
4746 /*
4747 * thisSackHighestGap will increase while handling NEW
4748 * segments this_sack_highest_newack will increase while
4749 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4750 * used for CMT DAC algo. saw_newack will also change.
4751 */
4752 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4753 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4754 num_seg, num_nr_seg, &ecn_seg_sums)) {
4755 wake_him++;
4756 }
4757 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4758 /*
4759 * validate the biggest_tsn_acked in the gap acks if
4760 * strict adherence is wanted.
4761 */
4762 if ((biggest_tsn_acked == send_s) ||
4763 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4764 /*
4765 * peer is either confused or we are under
4766 * attack. We must abort.
4767 */
4768 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4769 biggest_tsn_acked,
4770 send_s);
4771
4772 goto hopeless_peer;
4773 }
4774 }
4775 }
4776 /*******************************************/
4777 /* cancel ALL T3-send timer if accum moved */
4778 /*******************************************/
4779 if (asoc->sctp_cmt_on_off == 1) {
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 if (net->new_pseudo_cumack)
4782 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4783 stcb, net,
4784 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4785
4786 }
4787 } else {
4788 if (accum_moved) {
4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4791 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4792 }
4793 }
4794 }
4795 /********************************************/
4796 /* drop the acked chunks from the sentqueue */
4797 /********************************************/
4798 asoc->last_acked_seq = cum_ack;
4799
4800 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4801 if (tp1 == NULL)
4802 goto done_with_it;
4803 do {
4804 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4805 MAX_TSN)) {
4806 break;
4807 }
4808 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4809 /* no more sent on list */
4810 printf("Warning, tp1->sent == %d and its now acked?\n",
4811 tp1->sent);
4812 }
4813 tp2 = TAILQ_NEXT(tp1, sctp_next);
4814 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4815 if (tp1->pr_sctp_on) {
4816 if (asoc->pr_sctp_cnt != 0)
4817 asoc->pr_sctp_cnt--;
4818 }
4819 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4820 (asoc->total_flight > 0)) {
4821 #ifdef INVARIANTS
4822 panic("Warning flight size is postive and should be 0");
4823 #else
4824 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4825 asoc->total_flight);
4826 #endif
4827 asoc->total_flight = 0;
4828 }
4829 if (tp1->data) {
4830 /* sa_ignore NO_NULL_CHK */
4831 sctp_free_bufspace(stcb, asoc, tp1, 1);
4832 sctp_m_freem(tp1->data);
4833 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4834 asoc->sent_queue_cnt_removeable--;
4835 }
4836 }
4837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4838 sctp_log_sack(asoc->last_acked_seq,
4839 cum_ack,
4840 tp1->rec.data.TSN_seq,
4841 0,
4842 0,
4843 SCTP_LOG_FREE_SENT);
4844 }
4845 tp1->data = NULL;
4846 asoc->sent_queue_cnt--;
4847 sctp_free_a_chunk(stcb, tp1);
4848 wake_him++;
4849 tp1 = tp2;
4850 } while (tp1 != NULL);
4851
4852 done_with_it:
4853 /* sa_ignore NO_NULL_CHK */
4854 if ((wake_him) && (stcb->sctp_socket)) {
4855 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4856 struct socket *so;
4857
4858 #endif
4859 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4861 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4862 }
4863 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4864 so = SCTP_INP_SO(stcb->sctp_ep);
4865 atomic_add_int(&stcb->asoc.refcnt, 1);
4866 SCTP_TCB_UNLOCK(stcb);
4867 SCTP_SOCKET_LOCK(so, 1);
4868 SCTP_TCB_LOCK(stcb);
4869 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4870 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4871 /* assoc was freed while we were unlocked */
4872 SCTP_SOCKET_UNLOCK(so, 1);
4873 return;
4874 }
4875 #endif
4876 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4878 SCTP_SOCKET_UNLOCK(so, 1);
4879 #endif
4880 } else {
4881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4882 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4883 }
4884 }
4885
4886 if (asoc->fast_retran_loss_recovery && accum_moved) {
4887 if (compare_with_wrap(asoc->last_acked_seq,
4888 asoc->fast_recovery_tsn, MAX_TSN) ||
4889 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4890 /* Setup so we will exit RFC2582 fast recovery */
4891 will_exit_fast_recovery = 1;
4892 }
4893 }
4894 /*
4895 * Check for revoked fragments:
4896 *
4897 * if Previous sack - Had no frags then we can't have any revoked if
4898 * Previous sack - Had frag's then - If we now have frags aka
4899 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4900 * some of them. else - The peer revoked all ACKED fragments, since
4901 * we had some before and now we have NONE.
4902 */
4903
4904 if (num_seg) {
4905 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4906 asoc->saw_sack_with_frags = 1;
4907 } else if (asoc->saw_sack_with_frags) {
4908 int cnt_revoked = 0;
4909
4910 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4911 if (tp1 != NULL) {
4912 /* Peer revoked all dg's marked or acked */
4913 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4914 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4915 tp1->sent = SCTP_DATAGRAM_SENT;
4916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4917 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4918 tp1->whoTo->flight_size,
4919 tp1->book_size,
4920 (uintptr_t) tp1->whoTo,
4921 tp1->rec.data.TSN_seq);
4922 }
4923 sctp_flight_size_increase(tp1);
4924 sctp_total_flight_increase(stcb, tp1);
4925 tp1->rec.data.chunk_was_revoked = 1;
4926 /*
4927 * To ensure that this increase in
4928 * flightsize, which is artificial,
4929 * does not throttle the sender, we
4930 * also increase the cwnd
4931 * artificially.
4932 */
4933 tp1->whoTo->cwnd += tp1->book_size;
4934 cnt_revoked++;
4935 }
4936 }
4937 if (cnt_revoked) {
4938 reneged_all = 1;
4939 }
4940 }
4941 asoc->saw_sack_with_frags = 0;
4942 }
4943 if (num_nr_seg > 0)
4944 asoc->saw_sack_with_nr_frags = 1;
4945 else
4946 asoc->saw_sack_with_nr_frags = 0;
4947
4948 /* JRS - Use the congestion control given in the CC module */
4949 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4950
4951 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4952 /* nothing left in-flight */
4953 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 /* stop all timers */
4955 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4956 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4957 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4958 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4959 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4960 }
4961 }
4962 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4963 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4964 net->flight_size = 0;
4965 net->partial_bytes_acked = 0;
4966 }
4967 asoc->total_flight = 0;
4968 asoc->total_flight_count = 0;
4969 }
4970 /**********************************/
4971 /* Now what about shutdown issues */
4972 /**********************************/
4973 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4974 /* nothing left on sendqueue.. consider done */
4975 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4976 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4977 asoc->peers_rwnd, 0, 0, a_rwnd);
4978 }
4979 asoc->peers_rwnd = a_rwnd;
4980 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4981 /* SWS sender side engages */
4982 asoc->peers_rwnd = 0;
4983 }
4984 /* clean up */
4985 if ((asoc->stream_queue_cnt == 1) &&
4986 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4987 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4988 (asoc->locked_on_sending)
4989 ) {
4990 struct sctp_stream_queue_pending *sp;
4991
4992 /*
4993 * I may be in a state where we got all across.. but
4994 * cannot write more due to a shutdown... we abort
4995 * since the user did not indicate EOR in this case.
4996 */
4997 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4998 sctp_streamhead);
4999 if ((sp) && (sp->length == 0)) {
5000 asoc->locked_on_sending = NULL;
5001 if (sp->msg_is_complete) {
5002 asoc->stream_queue_cnt--;
5003 } else {
5004 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5005 asoc->stream_queue_cnt--;
5006 }
5007 }
5008 }
5009 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5010 (asoc->stream_queue_cnt == 0)) {
5011 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5012 /* Need to abort here */
5013 struct mbuf *oper;
5014
5015 abort_out_now:
5016 *abort_now = 1;
5017 /* XXX */
5018 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5019 0, M_DONTWAIT, 1, MT_DATA);
5020 if (oper) {
5021 struct sctp_paramhdr *ph;
5022 uint32_t *ippp;
5023
5024 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5025 sizeof(uint32_t);
5026 ph = mtod(oper, struct sctp_paramhdr *);
5027 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5028 ph->param_length = htons(SCTP_BUF_LEN(oper));
5029 ippp = (uint32_t *) (ph + 1);
5030 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5031 }
5032 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5033 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5034 return;
5035 } else {
5036 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5037 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5038 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5039 }
5040 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5041 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5042 sctp_stop_timers_for_shutdown(stcb);
5043 sctp_send_shutdown(stcb,
5044 stcb->asoc.primary_destination);
5045 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5046 stcb->sctp_ep, stcb, asoc->primary_destination);
5047 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5048 stcb->sctp_ep, stcb, asoc->primary_destination);
5049 }
5050 return;
5051 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5052 (asoc->stream_queue_cnt == 0)) {
5053 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5054 goto abort_out_now;
5055 }
5056 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5057 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5058 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5059 sctp_send_shutdown_ack(stcb,
5060 stcb->asoc.primary_destination);
5061 sctp_stop_timers_for_shutdown(stcb);
5062 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5063 stcb->sctp_ep, stcb, asoc->primary_destination);
5064 return;
5065 }
5066 }
5067 /*
5068 * Now here we are going to recycle net_ack for a different use...
5069 * HEADS UP.
5070 */
5071 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5072 net->net_ack = 0;
5073 }
5074
5075 /*
5076 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5077 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5078 * automatically ensure that.
5079 */
5080 if ((asoc->sctp_cmt_on_off == 1) &&
5081 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5082 (cmt_dac_flag == 0)) {
5083 this_sack_lowest_newack = cum_ack;
5084 }
5085 if ((num_seg > 0) || (num_nr_seg > 0)) {
5086 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5087 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5088 }
5089 /* JRS - Use the congestion control given in the CC module */
5090 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5091
5092 /******************************************************************
5093 * Here we do the stuff with ECN Nonce checking.
5094 * We basically check to see if the nonce sum flag was incorrect
5095 * or if resynchronization needs to be done. Also if we catch a
5096 * misbehaving receiver we give him the kick.
5097 ******************************************************************/
5098
5099 if (asoc->ecn_nonce_allowed) {
5100 if (asoc->nonce_sum_check) {
5101 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5102 if (asoc->nonce_wait_for_ecne == 0) {
5103 struct sctp_tmit_chunk *lchk;
5104
5105 lchk = TAILQ_FIRST(&asoc->send_queue);
5106 asoc->nonce_wait_for_ecne = 1;
5107 if (lchk) {
5108 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5109 } else {
5110 asoc->nonce_wait_tsn = asoc->sending_seq;
5111 }
5112 } else {
5113 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5114 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5115 /*
5116 * Misbehaving peer. We need
5117 * to react to this guy
5118 */
5119 asoc->ecn_allowed = 0;
5120 asoc->ecn_nonce_allowed = 0;
5121 }
5122 }
5123 }
5124 } else {
5125 /* See if Resynchronization Possible */
5126 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5127 asoc->nonce_sum_check = 1;
5128 /*
5129 * now we must calculate what the base is.
5130 * We do this based on two things, we know
5131 * the total's for all the segments
5132 * gap-acked in the SACK, its stored in
5133 * ecn_seg_sums. We also know the SACK's
5134 * nonce sum, its in nonce_sum_flag. So we
5135 * can build a truth table to back-calculate
5136 * the new value of
5137 * asoc->nonce_sum_expect_base:
5138 *
5139 * SACK-flag-Value Seg-Sums Base 0 0 0
5140 * 1 0 1 0 1 1 1
5141 * 1 0
5142 */
5143 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5144 }
5145 }
5146 }
5147 /* Now are we exiting loss recovery ? */
5148 if (will_exit_fast_recovery) {
5149 /* Ok, we must exit fast recovery */
5150 asoc->fast_retran_loss_recovery = 0;
5151 }
5152 if ((asoc->sat_t3_loss_recovery) &&
5153 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5154 MAX_TSN) ||
5155 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5156 /* end satellite t3 loss recovery */
5157 asoc->sat_t3_loss_recovery = 0;
5158 }
5159 /*
5160 * CMT Fast recovery
5161 */
5162 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5163 if (net->will_exit_fast_recovery) {
5164 /* Ok, we must exit fast recovery */
5165 net->fast_retran_loss_recovery = 0;
5166 }
5167 }
5168
5169 /* Adjust and set the new rwnd value */
5170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5171 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5172 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5173 }
5174 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5175 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5176 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5177 /* SWS sender side engages */
5178 asoc->peers_rwnd = 0;
5179 }
5180 if (asoc->peers_rwnd > old_rwnd) {
5181 win_probe_recovery = 1;
5182 }
5183 /*
5184 * Now we must setup so we have a timer up for anyone with
5185 * outstanding data.
5186 */
5187 done_once = 0;
5188 again:
5189 j = 0;
5190 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5191 if (win_probe_recovery && (net->window_probe)) {
5192 win_probe_recovered = 1;
5193 /*-
5194 * Find first chunk that was used with
5195 * window probe and clear the event. Put
5196 * it back into the send queue as if has
5197 * not been sent.
5198 */
5199 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5200 if (tp1->window_probe) {
5201 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5202 break;
5203 }
5204 }
5205 }
5206 if (net->flight_size) {
5207 j++;
5208 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5209 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5210 stcb->sctp_ep, stcb, net);
5211 }
5212 if (net->window_probe) {
5213 net->window_probe = 0;
5214 }
5215 } else {
5216 if (net->window_probe) {
5217 /*
5218 * In window probes we must assure a timer
5219 * is still running there
5220 */
5221 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5222 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5223 stcb->sctp_ep, stcb, net);
5224
5225 }
5226 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5227 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5228 stcb, net,
5229 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5230 }
5231 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5232 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5233 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5234 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5235 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5236 }
5237 }
5238 }
5239 }
5240 if ((j == 0) &&
5241 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5242 (asoc->sent_queue_retran_cnt == 0) &&
5243 (win_probe_recovered == 0) &&
5244 (done_once == 0)) {
5245 /*
5246 * huh, this should not happen unless all packets are
5247 * PR-SCTP and marked to skip of course.
5248 */
5249 if (sctp_fs_audit(asoc)) {
5250 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5251 net->flight_size = 0;
5252 }
5253 asoc->total_flight = 0;
5254 asoc->total_flight_count = 0;
5255 asoc->sent_queue_retran_cnt = 0;
5256 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5257 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5258 sctp_flight_size_increase(tp1);
5259 sctp_total_flight_increase(stcb, tp1);
5260 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5261 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5262 }
5263 }
5264 }
5265 done_once = 1;
5266 goto again;
5267 }
5268 /*********************************************/
5269 /* Here we perform PR-SCTP procedures */
5270 /* (section 4.2) */
5271 /*********************************************/
5272 /* C1. update advancedPeerAckPoint */
5273 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5274 asoc->advanced_peer_ack_point = cum_ack;
5275 }
5276 /* C2. try to further move advancedPeerAckPoint ahead */
5277 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5278 struct sctp_tmit_chunk *lchk;
5279 uint32_t old_adv_peer_ack_point;
5280
5281 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5282 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5283 /* C3. See if we need to send a Fwd-TSN */
5284 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5285 MAX_TSN)) {
5286 /*
5287 * ISSUE with ECN, see FWD-TSN processing for notes
5288 * on issues that will occur when the ECN NONCE
5289 * stuff is put into SCTP for cross checking.
5290 */
5291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5292 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5293 0xee, cum_ack, asoc->advanced_peer_ack_point,
5294 old_adv_peer_ack_point);
5295 }
5296 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5297 MAX_TSN)) {
5298
5299 send_forward_tsn(stcb, asoc);
5300 /*
5301 * ECN Nonce: Disable Nonce Sum check when
5302 * FWD TSN is sent and store resync tsn
5303 */
5304 asoc->nonce_sum_check = 0;
5305 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5306 } else if (lchk) {
5307 /* try to FR fwd-tsn's that get lost too */
5308 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5309 send_forward_tsn(stcb, asoc);
5310 }
5311 }
5312 }
5313 if (lchk) {
5314 /* Assure a timer is up */
5315 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5316 stcb->sctp_ep, stcb, lchk->whoTo);
5317 }
5318 }
5319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5320 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5321 a_rwnd,
5322 stcb->asoc.peers_rwnd,
5323 stcb->asoc.total_flight,
5324 stcb->asoc.total_output_queue_size);
5325 }
5326 }
5327
5328 void
5329 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5330 struct sctp_nets *netp, int *abort_flag)
5331 {
5332 /* Copy cum-ack */
5333 uint32_t cum_ack, a_rwnd;
5334
5335 cum_ack = ntohl(cp->cumulative_tsn_ack);
5336 /* Arrange so a_rwnd does NOT change */
5337 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5338
5339 /* Now call the express sack handling */
5340 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5341 }
5342
5343 static void
5344 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5345 struct sctp_stream_in *strmin)
5346 {
5347 struct sctp_queued_to_read *ctl, *nctl;
5348 struct sctp_association *asoc;
5349 uint16_t tt;
5350
5351 asoc = &stcb->asoc;
5352 tt = strmin->last_sequence_delivered;
5353 /*
5354 * First deliver anything prior to and including the stream no that
5355 * came in
5356 */
5357 ctl = TAILQ_FIRST(&strmin->inqueue);
5358 while (ctl) {
5359 nctl = TAILQ_NEXT(ctl, next);
5360 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5361 (tt == ctl->sinfo_ssn)) {
5362 /* this is deliverable now */
5363 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5364 /* subtract pending on streams */
5365 asoc->size_on_all_streams -= ctl->length;
5366 sctp_ucount_decr(asoc->cnt_on_all_streams);
5367 /* deliver it to at least the delivery-q */
5368 if (stcb->sctp_socket) {
5369 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5370 sctp_add_to_readq(stcb->sctp_ep, stcb,
5371 ctl,
5372 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5373 }
5374 } else {
5375 /* no more delivery now. */
5376 break;
5377 }
5378 ctl = nctl;
5379 }
5380 /*
5381 * now we must deliver things in queue the normal way if any are
5382 * now ready.
5383 */
5384 tt = strmin->last_sequence_delivered + 1;
5385 ctl = TAILQ_FIRST(&strmin->inqueue);
5386 while (ctl) {
5387 nctl = TAILQ_NEXT(ctl, next);
5388 if (tt == ctl->sinfo_ssn) {
5389 /* this is deliverable now */
5390 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5391 /* subtract pending on streams */
5392 asoc->size_on_all_streams -= ctl->length;
5393 sctp_ucount_decr(asoc->cnt_on_all_streams);
5394 /* deliver it to at least the delivery-q */
5395 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5396 if (stcb->sctp_socket) {
5397 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5398 sctp_add_to_readq(stcb->sctp_ep, stcb,
5399 ctl,
5400 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5401
5402 }
5403 tt = strmin->last_sequence_delivered + 1;
5404 } else {
5405 break;
5406 }
5407 ctl = nctl;
5408 }
5409 }
5410
5411 static void
5412 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5413 struct sctp_association *asoc,
5414 uint16_t stream, uint16_t seq)
5415 {
5416 struct sctp_tmit_chunk *chk, *at;
5417
5418 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5419 /* For each one on here see if we need to toss it */
5420 /*
5421 * For now large messages held on the reasmqueue that are
5422 * complete will be tossed too. We could in theory do more
5423 * work to spin through and stop after dumping one msg aka
5424 * seeing the start of a new msg at the head, and call the
5425 * delivery function... to see if it can be delivered... But
5426 * for now we just dump everything on the queue.
5427 */
5428 chk = TAILQ_FIRST(&asoc->reasmqueue);
5429 while (chk) {
5430 at = TAILQ_NEXT(chk, sctp_next);
5431 /*
5432 * Do not toss it if on a different stream or marked
5433 * for unordered delivery in which case the stream
5434 * sequence number has no meaning.
5435 */
5436 if ((chk->rec.data.stream_number != stream) ||
5437 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5438 chk = at;
5439 continue;
5440 }
5441 if (chk->rec.data.stream_seq == seq) {
5442 /* It needs to be tossed */
5443 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5444 if (compare_with_wrap(chk->rec.data.TSN_seq,
5445 asoc->tsn_last_delivered, MAX_TSN)) {
5446 asoc->tsn_last_delivered =
5447 chk->rec.data.TSN_seq;
5448 asoc->str_of_pdapi =
5449 chk->rec.data.stream_number;
5450 asoc->ssn_of_pdapi =
5451 chk->rec.data.stream_seq;
5452 asoc->fragment_flags =
5453 chk->rec.data.rcv_flags;
5454 }
5455 asoc->size_on_reasm_queue -= chk->send_size;
5456 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5457
5458 /* Clear up any stream problem */
5459 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5460 SCTP_DATA_UNORDERED &&
5461 (compare_with_wrap(chk->rec.data.stream_seq,
5462 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5463 MAX_SEQ))) {
5464 /*
5465 * We must dump forward this streams
5466 * sequence number if the chunk is
5467 * not unordered that is being
5468 * skipped. There is a chance that
5469 * if the peer does not include the
5470 * last fragment in its FWD-TSN we
5471 * WILL have a problem here since
5472 * you would have a partial chunk in
5473 * queue that may not be
5474 * deliverable. Also if a Partial
5475 * delivery API as started the user
5476 * may get a partial chunk. The next
5477 * read returning a new chunk...
5478 * really ugly but I see no way
5479 * around it! Maybe a notify??
5480 */
5481 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5482 chk->rec.data.stream_seq;
5483 }
5484 if (chk->data) {
5485 sctp_m_freem(chk->data);
5486 chk->data = NULL;
5487 }
5488 sctp_free_a_chunk(stcb, chk);
5489 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5490 /*
5491 * If the stream_seq is > than the purging
5492 * one, we are done
5493 */
5494 break;
5495 }
5496 chk = at;
5497 }
5498 }
5499 }
5500
5501
5502 void
5503 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5504 struct sctp_forward_tsn_chunk *fwd,
5505 int *abort_flag, struct mbuf *m, int offset)
5506 {
5507 /*
5508 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5509 * forward TSN, when the SACK comes back that acknowledges the
5510 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5511 * get quite tricky since we may have sent more data interveneing
5512 * and must carefully account for what the SACK says on the nonce
5513 * and any gaps that are reported. This work will NOT be done here,
5514 * but I note it here since it is really related to PR-SCTP and
5515 * FWD-TSN's
5516 */
5517
5518 /* The pr-sctp fwd tsn */
5519 /*
5520 * here we will perform all the data receiver side steps for
5521 * processing FwdTSN, as required in by pr-sctp draft:
5522 *
5523 * Assume we get FwdTSN(x):
5524 *
5525 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5526 * others we have 3) examine and update re-ordering queue on
5527 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5528 * report where we are.
5529 */
5530 struct sctp_association *asoc;
5531 uint32_t new_cum_tsn, gap;
5532 unsigned int i, fwd_sz, cumack_set_flag, m_size;
5533 uint32_t str_seq;
5534 struct sctp_stream_in *strm;
5535 struct sctp_tmit_chunk *chk, *at;
5536 struct sctp_queued_to_read *ctl, *sv;
5537
5538 cumack_set_flag = 0;
5539 asoc = &stcb->asoc;
5540 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5541 SCTPDBG(SCTP_DEBUG_INDATA1,
5542 "Bad size too small/big fwd-tsn\n");
5543 return;
5544 }
5545 m_size = (stcb->asoc.mapping_array_size << 3);
5546 /*************************************************************/
5547 /* 1. Here we update local cumTSN and shift the bitmap array */
5548 /*************************************************************/
5549 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5550
5551 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5552 asoc->cumulative_tsn == new_cum_tsn) {
5553 /* Already got there ... */
5554 return;
5555 }
5556 /*
5557 * now we know the new TSN is more advanced, let's find the actual
5558 * gap
5559 */
5560 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5561 asoc->cumulative_tsn = new_cum_tsn;
5562 if (gap >= m_size) {
5563 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5564 struct mbuf *oper;
5565
5566 /*
5567 * out of range (of single byte chunks in the rwnd I
5568 * give out). This must be an attacker.
5569 */
5570 *abort_flag = 1;
5571 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5572 0, M_DONTWAIT, 1, MT_DATA);
5573 if (oper) {
5574 struct sctp_paramhdr *ph;
5575 uint32_t *ippp;
5576
5577 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5578 (sizeof(uint32_t) * 3);
5579 ph = mtod(oper, struct sctp_paramhdr *);
5580 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5581 ph->param_length = htons(SCTP_BUF_LEN(oper));
5582 ippp = (uint32_t *) (ph + 1);
5583 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5584 ippp++;
5585 *ippp = asoc->highest_tsn_inside_map;
5586 ippp++;
5587 *ippp = new_cum_tsn;
5588 }
5589 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5590 sctp_abort_an_association(stcb->sctp_ep, stcb,
5591 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5592 return;
5593 }
5594 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5595
5596 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5597 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5598 asoc->highest_tsn_inside_map = new_cum_tsn;
5599
5600 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5601 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5602
5603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5604 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5605 }
5606 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5607 } else {
5608 SCTP_TCB_LOCK_ASSERT(stcb);
5609 for (i = 0; i <= gap; i++) {
5610 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5611 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5612 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5613 if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5614 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5615 }
5616 }
5617 }
5618 }
5619 /*************************************************************/
5620 /* 2. Clear up re-assembly queue */
5621 /*************************************************************/
5622 /*
5623 * First service it if pd-api is up, just in case we can progress it
5624 * forward
5625 */
5626 if (asoc->fragmented_delivery_inprogress) {
5627 sctp_service_reassembly(stcb, asoc);
5628 }
5629 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5630 /* For each one on here see if we need to toss it */
5631 /*
5632 * For now large messages held on the reasmqueue that are
5633 * complete will be tossed too. We could in theory do more
5634 * work to spin through and stop after dumping one msg aka
5635 * seeing the start of a new msg at the head, and call the
5636 * delivery function... to see if it can be delivered... But
5637 * for now we just dump everything on the queue.
5638 */
5639 chk = TAILQ_FIRST(&asoc->reasmqueue);
5640 while (chk) {
5641 at = TAILQ_NEXT(chk, sctp_next);
5642 if ((compare_with_wrap(new_cum_tsn,
5643 chk->rec.data.TSN_seq, MAX_TSN)) ||
5644 (new_cum_tsn == chk->rec.data.TSN_seq)) {
5645 /* It needs to be tossed */
5646 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5647 if (compare_with_wrap(chk->rec.data.TSN_seq,
5648 asoc->tsn_last_delivered, MAX_TSN)) {
5649 asoc->tsn_last_delivered =
5650 chk->rec.data.TSN_seq;
5651 asoc->str_of_pdapi =
5652 chk->rec.data.stream_number;
5653 asoc->ssn_of_pdapi =
5654 chk->rec.data.stream_seq;
5655 asoc->fragment_flags =
5656 chk->rec.data.rcv_flags;
5657 }
5658 asoc->size_on_reasm_queue -= chk->send_size;
5659 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5660
5661 /* Clear up any stream problem */
5662 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5663 SCTP_DATA_UNORDERED &&
5664 (compare_with_wrap(chk->rec.data.stream_seq,
5665 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5666 MAX_SEQ))) {
5667 /*
5668 * We must dump forward this streams
5669 * sequence number if the chunk is
5670 * not unordered that is being
5671 * skipped. There is a chance that
5672 * if the peer does not include the
5673 * last fragment in its FWD-TSN we
5674 * WILL have a problem here since
5675 * you would have a partial chunk in
5676 * queue that may not be
5677 * deliverable. Also if a Partial
5678 * delivery API as started the user
5679 * may get a partial chunk. The next
5680 * read returning a new chunk...
5681 * really ugly but I see no way
5682 * around it! Maybe a notify??
5683 */
5684 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5685 chk->rec.data.stream_seq;
5686 }
5687 if (chk->data) {
5688 sctp_m_freem(chk->data);
5689 chk->data = NULL;
5690 }
5691 sctp_free_a_chunk(stcb, chk);
5692 } else {
5693 /*
5694 * Ok we have gone beyond the end of the
5695 * fwd-tsn's mark.
5696 */
5697 break;
5698 }
5699 chk = at;
5700 }
5701 }
5702 /*******************************************************/
5703 /* 3. Update the PR-stream re-ordering queues and fix */
5704 /* delivery issues as needed. */
5705 /*******************************************************/
5706 fwd_sz -= sizeof(*fwd);
5707 if (m && fwd_sz) {
5708 /* New method. */
5709 unsigned int num_str;
5710 struct sctp_strseq *stseq, strseqbuf;
5711
5712 offset += sizeof(*fwd);
5713
5714 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5715 num_str = fwd_sz / sizeof(struct sctp_strseq);
5716 for (i = 0; i < num_str; i++) {
5717 uint16_t st;
5718
5719 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5720 sizeof(struct sctp_strseq),
5721 (uint8_t *) & strseqbuf);
5722 offset += sizeof(struct sctp_strseq);
5723 if (stseq == NULL) {
5724 break;
5725 }
5726 /* Convert */
5727 st = ntohs(stseq->stream);
5728 stseq->stream = st;
5729 st = ntohs(stseq->sequence);
5730 stseq->sequence = st;
5731
5732 /* now process */
5733
5734 /*
5735 * Ok we now look for the stream/seq on the read
5736 * queue where its not all delivered. If we find it
5737 * we transmute the read entry into a PDI_ABORTED.
5738 */
5739 if (stseq->stream >= asoc->streamincnt) {
5740 /* screwed up streams, stop! */
5741 break;
5742 }
5743 if ((asoc->str_of_pdapi == stseq->stream) &&
5744 (asoc->ssn_of_pdapi == stseq->sequence)) {
5745 /*
5746 * If this is the one we were partially
5747 * delivering now then we no longer are.
5748 * Note this will change with the reassembly
5749 * re-write.
5750 */
5751 asoc->fragmented_delivery_inprogress = 0;
5752 }
5753 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5754 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5755 if ((ctl->sinfo_stream == stseq->stream) &&
5756 (ctl->sinfo_ssn == stseq->sequence)) {
5757 str_seq = (stseq->stream << 16) | stseq->sequence;
5758 ctl->end_added = 1;
5759 ctl->pdapi_aborted = 1;
5760 sv = stcb->asoc.control_pdapi;
5761 stcb->asoc.control_pdapi = ctl;
5762 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5763 stcb,
5764 SCTP_PARTIAL_DELIVERY_ABORTED,
5765 (void *)&str_seq,
5766 SCTP_SO_NOT_LOCKED);
5767 stcb->asoc.control_pdapi = sv;
5768 break;
5769 } else if ((ctl->sinfo_stream == stseq->stream) &&
5770 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5771 /* We are past our victim SSN */
5772 break;
5773 }
5774 }
5775 strm = &asoc->strmin[stseq->stream];
5776 if (compare_with_wrap(stseq->sequence,
5777 strm->last_sequence_delivered, MAX_SEQ)) {
5778 /* Update the sequence number */
5779 strm->last_sequence_delivered =
5780 stseq->sequence;
5781 }
5782 /* now kick the stream the new way */
5783 /* sa_ignore NO_NULL_CHK */
5784 sctp_kick_prsctp_reorder_queue(stcb, strm);
5785 }
5786 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5787 }
5788 /*
5789 * Now slide thing forward.
5790 */
5791 sctp_slide_mapping_arrays(stcb);
5792
5793 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5794 /* now lets kick out and check for more fragmented delivery */
5795 /* sa_ignore NO_NULL_CHK */
5796 sctp_deliver_reasm_check(stcb, &stcb->asoc);
5797 }
5798 }
Cache object: f4db8b2b8f367eb272055c8e8568128b
|