1 /*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/9.1/sys/netinet/sctp_indata.c 268434 2014-07-08 21:55:27Z delphij $");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47
48
49 /*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 uint32_t calc = 0;
70
71 /*
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
76 */
77 if (stcb->sctp_socket == NULL)
78 return (calc);
79
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 return (calc);
86 }
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90 /*
91 * take out what has NOT been put on socket queue and we yet hold
92 * for putting up.
93 */
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
98
99 if (calc == 0) {
100 /* out of space */
101 return (calc);
102 }
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 /*
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
108 */
109 if (calc < stcb->asoc.my_rwnd_control_len) {
110 calc = 1;
111 }
112 return (calc);
113 }
114
115
116
117 /*
118 * Build out our readq entry based on the incoming packet.
119 */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
126 struct mbuf *dm)
127 {
128 struct sctp_queued_to_read *read_queue_e = NULL;
129
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
132 goto failed_build;
133 }
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 return (read_queue_e);
158 }
159
160
161 /*
162 * Build out our readq entry based on the incoming packet.
163 */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
167 {
168 struct sctp_queued_to_read *read_queue_e = NULL;
169
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
172 goto failed_build;
173 }
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 return (read_queue_e);
198 }
199
200
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
208 struct cmsghdr *cmh;
209 struct mbuf *ret;
210 int len;
211 int use_extended;
212 int provide_nxt;
213
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
218 return (NULL);
219 }
220 len = 0;
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 }
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 provide_nxt = 1;
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 } else {
230 provide_nxt = 0;
231 }
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 use_extended = 1;
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 } else {
237 use_extended = 0;
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 }
240 } else {
241 use_extended = 0;
242 }
243
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
245 if (ret == NULL) {
246 /* No space */
247 return (ret);
248 }
249 SCTP_BUF_LEN(ret) = 0;
250
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 /*
254 * Make sure that there is no un-initialized padding between the
255 * cmsg header and cmsg data and after the cmsg data.
256 */
257 memset(cmh, 0, len);
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 cmh->cmsg_type = SCTP_RCVINFO;
262 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 rcvinfo->rcv_context = sinfo->sinfo_context;
270 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 }
274 if (provide_nxt) {
275 cmh->cmsg_level = IPPROTO_SCTP;
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 cmh->cmsg_type = SCTP_NXTINFO;
278 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 nxtinfo->nxt_flags = 0;
281 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 }
284 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 }
287 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 }
290 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 }
296 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 cmh->cmsg_level = IPPROTO_SCTP;
298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 if (use_extended) {
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 cmh->cmsg_type = SCTP_EXTRCV;
302 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 } else {
305 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 cmh->cmsg_type = SCTP_SNDRCV;
307 *outinfo = *sinfo;
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 }
310 }
311 return (ret);
312 }
313
314
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 uint32_t gap, i, cumackp1;
319 int fnd = 0;
320
321 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 return;
323 }
324 cumackp1 = asoc->cumulative_tsn + 1;
325 if (SCTP_TSN_GT(cumackp1, tsn)) {
326 /*
327 * this tsn is behind the cum ack and thus we don't need to
328 * worry about it being moved from one to the other.
329 */
330 return;
331 }
332 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 panic("Things are really messed up now!!");
338 #endif
339 }
340 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 asoc->highest_tsn_inside_nr_map = tsn;
344 }
345 if (tsn == asoc->highest_tsn_inside_map) {
346 /* We must back down to see what the new highest is */
347 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 asoc->highest_tsn_inside_map = i;
351 fnd = 1;
352 break;
353 }
354 }
355 if (!fnd) {
356 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 }
358 }
359 }
360
361
362 /*
363 * We are delivering currently from the reassembly queue. We must continue to
364 * deliver until we either: 1) run out of space. 2) run out of sequential
365 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366 */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 struct sctp_tmit_chunk *chk, *nchk;
371 uint16_t nxt_todel;
372 uint16_t stream_no;
373 int end = 0;
374 int cntDel;
375 struct sctp_queued_to_read *control, *ctl, *nctl;
376
377 if (stcb == NULL)
378 return;
379
380 cntDel = stream_no = 0;
381 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 /* socket above is long gone or going.. */
385 abandon:
386 asoc->fragmented_delivery_inprogress = 0;
387 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 asoc->size_on_reasm_queue -= chk->send_size;
390 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 /*
392 * Lose the data pointer, since its in the socket
393 * buffer
394 */
395 if (chk->data) {
396 sctp_m_freem(chk->data);
397 chk->data = NULL;
398 }
399 /* Now free the address and data */
400 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 /* sa_ignore FREED_MEMORY */
402 }
403 return;
404 }
405 SCTP_TCB_LOCK_ASSERT(stcb);
406 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 /* Can't deliver more :< */
409 return;
410 }
411 stream_no = chk->rec.data.stream_number;
412 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 if (nxt_todel != chk->rec.data.stream_seq &&
414 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 /*
416 * Not the next sequence to deliver in its stream OR
417 * unordered
418 */
419 return;
420 }
421 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422
423 control = sctp_build_readq_entry_chk(stcb, chk);
424 if (control == NULL) {
425 /* out of memory? */
426 return;
427 }
428 /* save it off for our future deliveries */
429 stcb->asoc.control_pdapi = control;
430 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 end = 1;
432 else
433 end = 0;
434 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 sctp_add_to_readq(stcb->sctp_ep,
436 stcb, control, &stcb->sctp_socket->so_rcv, end,
437 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 cntDel++;
439 } else {
440 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 end = 1;
442 else
443 end = 0;
444 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 stcb->asoc.control_pdapi,
447 chk->data, end, chk->rec.data.TSN_seq,
448 &stcb->sctp_socket->so_rcv)) {
449 /*
450 * something is very wrong, either
451 * control_pdapi is NULL, or the tail_mbuf
452 * is corrupt, or there is a EOM already on
453 * the mbuf chain.
454 */
455 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 goto abandon;
457 } else {
458 #ifdef INVARIANTS
459 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 panic("This should not happen control_pdapi NULL?");
461 }
462 /* if we did not panic, it was a EOM */
463 panic("Bad chunking ??");
464 #else
465 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 }
468 SCTP_PRINTF("Bad chunking ??\n");
469 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470
471 #endif
472 goto abandon;
473 }
474 }
475 cntDel++;
476 }
477 /* pull it we did it */
478 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 asoc->fragmented_delivery_inprogress = 0;
481 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 asoc->strmin[stream_no].last_sequence_delivered++;
483 }
484 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 }
487 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 /*
489 * turn the flag back on since we just delivered
490 * yet another one.
491 */
492 asoc->fragmented_delivery_inprogress = 1;
493 }
494 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498
499 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 asoc->size_on_reasm_queue -= chk->send_size;
501 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 /* free up the chk */
503 chk->data = NULL;
504 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505
506 if (asoc->fragmented_delivery_inprogress == 0) {
507 /*
508 * Now lets see if we can deliver the next one on
509 * the stream
510 */
511 struct sctp_stream_in *strm;
512
513 strm = &asoc->strmin[stream_no];
514 nxt_todel = strm->last_sequence_delivered + 1;
515 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 /* Deliver more if we can. */
517 if (nxt_todel == ctl->sinfo_ssn) {
518 TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 asoc->size_on_all_streams -= ctl->length;
520 sctp_ucount_decr(asoc->cnt_on_all_streams);
521 strm->last_sequence_delivered++;
522 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 sctp_add_to_readq(stcb->sctp_ep, stcb,
524 ctl,
525 &stcb->sctp_socket->so_rcv, 1,
526 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 } else {
528 break;
529 }
530 nxt_todel = strm->last_sequence_delivered + 1;
531 }
532 break;
533 }
534 }
535 }
536
537 /*
538 * Queue the chunk either right into the socket buffer if it is the next one
539 * to go OR put it in the correct place in the delivery queue. If we do
540 * append to the so_buf, keep doing so until we are out of order. One big
541 * question still remains, what to do when the socket buffer is FULL??
542 */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545 struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 /*
548 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 * all the data in one stream this could happen quite rapidly. One
550 * could use the TSN to keep track of things, but this scheme breaks
551 * down in the other type of stream useage that could occur. Send a
552 * single msg to stream 0, send 4Billion messages to stream 1, now
553 * send a message to stream 0. You have a situation where the TSN
554 * has wrapped but not in the stream. Is this worth worrying about
555 * or should we just change our queue sort at the bottom to be by
556 * TSN.
557 *
558 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 * assignment this could happen... and I don't see how this would be
561 * a violation. So for now I am undecided an will leave the sort by
562 * SSN alone. Maybe a hybred approach is the answer
563 *
564 */
565 struct sctp_stream_in *strm;
566 struct sctp_queued_to_read *at;
567 int queue_needed;
568 uint16_t nxt_todel;
569 struct mbuf *oper;
570
571 queue_needed = 1;
572 asoc->size_on_all_streams += control->length;
573 sctp_ucount_incr(asoc->cnt_on_all_streams);
574 strm = &asoc->strmin[control->sinfo_stream];
575 nxt_todel = strm->last_sequence_delivered + 1;
576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
577 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
578 }
579 SCTPDBG(SCTP_DEBUG_INDATA1,
580 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
581 (uint32_t) control->sinfo_stream,
582 (uint32_t) strm->last_sequence_delivered,
583 (uint32_t) nxt_todel);
584 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
585 /* The incoming sseq is behind where we last delivered? */
586 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
587 control->sinfo_ssn, strm->last_sequence_delivered);
588 protocol_error:
589 /*
590 * throw it in the stream so it gets cleaned up in
591 * association destruction
592 */
593 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
594 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
595 0, M_DONTWAIT, 1, MT_DATA);
596 if (oper) {
597 struct sctp_paramhdr *ph;
598 uint32_t *ippp;
599
600 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
601 (sizeof(uint32_t) * 3);
602 ph = mtod(oper, struct sctp_paramhdr *);
603 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
604 ph->param_length = htons(SCTP_BUF_LEN(oper));
605 ippp = (uint32_t *) (ph + 1);
606 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
607 ippp++;
608 *ippp = control->sinfo_tsn;
609 ippp++;
610 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
611 }
612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
613 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
614 *abort_flag = 1;
615 return;
616
617 }
618 if (nxt_todel == control->sinfo_ssn) {
619 /* can be delivered right away? */
620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
621 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
622 }
623 /* EY it wont be queued if it could be delivered directly */
624 queue_needed = 0;
625 asoc->size_on_all_streams -= control->length;
626 sctp_ucount_decr(asoc->cnt_on_all_streams);
627 strm->last_sequence_delivered++;
628
629 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
630 sctp_add_to_readq(stcb->sctp_ep, stcb,
631 control,
632 &stcb->sctp_socket->so_rcv, 1,
633 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
634 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
635 /* all delivered */
636 nxt_todel = strm->last_sequence_delivered + 1;
637 if (nxt_todel == control->sinfo_ssn) {
638 TAILQ_REMOVE(&strm->inqueue, control, next);
639 asoc->size_on_all_streams -= control->length;
640 sctp_ucount_decr(asoc->cnt_on_all_streams);
641 strm->last_sequence_delivered++;
642 /*
643 * We ignore the return of deliver_data here
644 * since we always can hold the chunk on the
645 * d-queue. And we have a finite number that
646 * can be delivered from the strq.
647 */
648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
649 sctp_log_strm_del(control, NULL,
650 SCTP_STR_LOG_FROM_IMMED_DEL);
651 }
652 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
653 sctp_add_to_readq(stcb->sctp_ep, stcb,
654 control,
655 &stcb->sctp_socket->so_rcv, 1,
656 SCTP_READ_LOCK_NOT_HELD,
657 SCTP_SO_NOT_LOCKED);
658 continue;
659 }
660 break;
661 }
662 }
663 if (queue_needed) {
664 /*
665 * Ok, we did not deliver this guy, find the correct place
666 * to put it on the queue.
667 */
668 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
669 goto protocol_error;
670 }
671 if (TAILQ_EMPTY(&strm->inqueue)) {
672 /* Empty queue */
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
675 }
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
677 } else {
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
680 /*
681 * one in queue is bigger than the
682 * new one, insert before this one
683 */
684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
685 sctp_log_strm_del(control, at,
686 SCTP_STR_LOG_FROM_INSERT_MD);
687 }
688 TAILQ_INSERT_BEFORE(at, control, next);
689 break;
690 } else if (at->sinfo_ssn == control->sinfo_ssn) {
691 /*
692 * Gak, He sent me a duplicate str
693 * seq number
694 */
695 /*
696 * foo bar, I guess I will just free
697 * this new guy, should we abort
698 * too? FIX ME MAYBE? Or it COULD be
699 * that the SSN's have wrapped.
700 * Maybe I should compare to TSN
701 * somehow... sigh for now just blow
702 * away the chunk!
703 */
704
705 if (control->data)
706 sctp_m_freem(control->data);
707 control->data = NULL;
708 asoc->size_on_all_streams -= control->length;
709 sctp_ucount_decr(asoc->cnt_on_all_streams);
710 if (control->whoFrom) {
711 sctp_free_remote_addr(control->whoFrom);
712 control->whoFrom = NULL;
713 }
714 sctp_free_a_readq(stcb, control);
715 return;
716 } else {
717 if (TAILQ_NEXT(at, next) == NULL) {
718 /*
719 * We are at the end, insert
720 * it after this one
721 */
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 sctp_log_strm_del(control, at,
724 SCTP_STR_LOG_FROM_INSERT_TL);
725 }
726 TAILQ_INSERT_AFTER(&strm->inqueue,
727 at, control, next);
728 break;
729 }
730 }
731 }
732 }
733 }
734 }
735
736 /*
737 * Returns two things: You get the total size of the deliverable parts of the
738 * first fragmented message on the reassembly queue. And you get a 1 back if
739 * all of the message is ready or a 0 back if the message is still incomplete
740 */
741 static int
742 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
743 {
744 struct sctp_tmit_chunk *chk;
745 uint32_t tsn;
746
747 *t_size = 0;
748 chk = TAILQ_FIRST(&asoc->reasmqueue);
749 if (chk == NULL) {
750 /* nothing on the queue */
751 return (0);
752 }
753 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
754 /* Not a first on the queue */
755 return (0);
756 }
757 tsn = chk->rec.data.TSN_seq;
758 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
759 if (tsn != chk->rec.data.TSN_seq) {
760 return (0);
761 }
762 *t_size += chk->send_size;
763 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
764 return (1);
765 }
766 tsn++;
767 }
768 return (0);
769 }
770
771 static void
772 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
773 {
774 struct sctp_tmit_chunk *chk;
775 uint16_t nxt_todel;
776 uint32_t tsize, pd_point;
777
778 doit_again:
779 chk = TAILQ_FIRST(&asoc->reasmqueue);
780 if (chk == NULL) {
781 /* Huh? */
782 asoc->size_on_reasm_queue = 0;
783 asoc->cnt_on_reasm_queue = 0;
784 return;
785 }
786 if (asoc->fragmented_delivery_inprogress == 0) {
787 nxt_todel =
788 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
789 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
790 (nxt_todel == chk->rec.data.stream_seq ||
791 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
792 /*
793 * Yep the first one is here and its ok to deliver
794 * but should we?
795 */
796 if (stcb->sctp_socket) {
797 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
798 stcb->sctp_ep->partial_delivery_point);
799 } else {
800 pd_point = stcb->sctp_ep->partial_delivery_point;
801 }
802 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
803
804 /*
805 * Yes, we setup to start reception, by
806 * backing down the TSN just in case we
807 * can't deliver. If we
808 */
809 asoc->fragmented_delivery_inprogress = 1;
810 asoc->tsn_last_delivered =
811 chk->rec.data.TSN_seq - 1;
812 asoc->str_of_pdapi =
813 chk->rec.data.stream_number;
814 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
815 asoc->pdapi_ppid = chk->rec.data.payloadtype;
816 asoc->fragment_flags = chk->rec.data.rcv_flags;
817 sctp_service_reassembly(stcb, asoc);
818 }
819 }
820 } else {
821 /*
822 * Service re-assembly will deliver stream data queued at
823 * the end of fragmented delivery.. but it wont know to go
824 * back and call itself again... we do that here with the
825 * got doit_again
826 */
827 sctp_service_reassembly(stcb, asoc);
828 if (asoc->fragmented_delivery_inprogress == 0) {
829 /*
830 * finished our Fragmented delivery, could be more
831 * waiting?
832 */
833 goto doit_again;
834 }
835 }
836 }
837
838 /*
839 * Dump onto the re-assembly queue, in its proper place. After dumping on the
840 * queue, see if anthing can be delivered. If so pull it off (or as much as
841 * we can. If we run out of space then we must dump what we can and set the
842 * appropriate flag to say we queued what we could.
843 */
844 static void
845 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
846 struct sctp_tmit_chunk *chk, int *abort_flag)
847 {
848 struct mbuf *oper;
849 uint32_t cum_ackp1, prev_tsn, post_tsn;
850 struct sctp_tmit_chunk *at, *prev, *next;
851
852 prev = next = NULL;
853 cum_ackp1 = asoc->tsn_last_delivered + 1;
854 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 /* This is the first one on the queue */
856 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
857 /*
858 * we do not check for delivery of anything when only one
859 * fragment is here
860 */
861 asoc->size_on_reasm_queue = chk->send_size;
862 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 if (chk->rec.data.TSN_seq == cum_ackp1) {
864 if (asoc->fragmented_delivery_inprogress == 0 &&
865 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 SCTP_DATA_FIRST_FRAG) {
867 /*
868 * An empty queue, no delivery inprogress,
869 * we hit the next one and it does NOT have
870 * a FIRST fragment mark.
871 */
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 0, M_DONTWAIT, 1, MT_DATA);
875
876 if (oper) {
877 struct sctp_paramhdr *ph;
878 uint32_t *ippp;
879
880 SCTP_BUF_LEN(oper) =
881 sizeof(struct sctp_paramhdr) +
882 (sizeof(uint32_t) * 3);
883 ph = mtod(oper, struct sctp_paramhdr *);
884 ph->param_type =
885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 ph->param_length = htons(SCTP_BUF_LEN(oper));
887 ippp = (uint32_t *) (ph + 1);
888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
889 ippp++;
890 *ippp = chk->rec.data.TSN_seq;
891 ippp++;
892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
893
894 }
895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
897 *abort_flag = 1;
898 } else if (asoc->fragmented_delivery_inprogress &&
899 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
900 /*
901 * We are doing a partial delivery and the
902 * NEXT chunk MUST be either the LAST or
903 * MIDDLE fragment NOT a FIRST
904 */
905 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907 0, M_DONTWAIT, 1, MT_DATA);
908 if (oper) {
909 struct sctp_paramhdr *ph;
910 uint32_t *ippp;
911
912 SCTP_BUF_LEN(oper) =
913 sizeof(struct sctp_paramhdr) +
914 (3 * sizeof(uint32_t));
915 ph = mtod(oper, struct sctp_paramhdr *);
916 ph->param_type =
917 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918 ph->param_length = htons(SCTP_BUF_LEN(oper));
919 ippp = (uint32_t *) (ph + 1);
920 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
921 ippp++;
922 *ippp = chk->rec.data.TSN_seq;
923 ippp++;
924 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
925 }
926 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
928 *abort_flag = 1;
929 } else if (asoc->fragmented_delivery_inprogress) {
930 /*
931 * Here we are ok with a MIDDLE or LAST
932 * piece
933 */
934 if (chk->rec.data.stream_number !=
935 asoc->str_of_pdapi) {
936 /* Got to be the right STR No */
937 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
938 chk->rec.data.stream_number,
939 asoc->str_of_pdapi);
940 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
941 0, M_DONTWAIT, 1, MT_DATA);
942 if (oper) {
943 struct sctp_paramhdr *ph;
944 uint32_t *ippp;
945
946 SCTP_BUF_LEN(oper) =
947 sizeof(struct sctp_paramhdr) +
948 (sizeof(uint32_t) * 3);
949 ph = mtod(oper,
950 struct sctp_paramhdr *);
951 ph->param_type =
952 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
953 ph->param_length =
954 htons(SCTP_BUF_LEN(oper));
955 ippp = (uint32_t *) (ph + 1);
956 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
957 ippp++;
958 *ippp = chk->rec.data.TSN_seq;
959 ippp++;
960 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
961 }
962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
963 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
964 *abort_flag = 1;
965 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
966 SCTP_DATA_UNORDERED &&
967 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
968 /* Got to be the right STR Seq */
969 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
970 chk->rec.data.stream_seq,
971 asoc->ssn_of_pdapi);
972 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
973 0, M_DONTWAIT, 1, MT_DATA);
974 if (oper) {
975 struct sctp_paramhdr *ph;
976 uint32_t *ippp;
977
978 SCTP_BUF_LEN(oper) =
979 sizeof(struct sctp_paramhdr) +
980 (3 * sizeof(uint32_t));
981 ph = mtod(oper,
982 struct sctp_paramhdr *);
983 ph->param_type =
984 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
985 ph->param_length =
986 htons(SCTP_BUF_LEN(oper));
987 ippp = (uint32_t *) (ph + 1);
988 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
989 ippp++;
990 *ippp = chk->rec.data.TSN_seq;
991 ippp++;
992 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
993
994 }
995 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
996 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
997 *abort_flag = 1;
998 }
999 }
1000 }
1001 return;
1002 }
1003 /* Find its place */
1004 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1005 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1006 /*
1007 * one in queue is bigger than the new one, insert
1008 * before this one
1009 */
1010 /* A check */
1011 asoc->size_on_reasm_queue += chk->send_size;
1012 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1013 next = at;
1014 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1015 break;
1016 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1017 /* Gak, He sent me a duplicate str seq number */
1018 /*
1019 * foo bar, I guess I will just free this new guy,
1020 * should we abort too? FIX ME MAYBE? Or it COULD be
1021 * that the SSN's have wrapped. Maybe I should
1022 * compare to TSN somehow... sigh for now just blow
1023 * away the chunk!
1024 */
1025 if (chk->data) {
1026 sctp_m_freem(chk->data);
1027 chk->data = NULL;
1028 }
1029 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1030 return;
1031 } else {
1032 prev = at;
1033 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1034 /*
1035 * We are at the end, insert it after this
1036 * one
1037 */
1038 /* check it first */
1039 asoc->size_on_reasm_queue += chk->send_size;
1040 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1042 break;
1043 }
1044 }
1045 }
1046 /* Now the audits */
1047 if (prev) {
1048 prev_tsn = chk->rec.data.TSN_seq - 1;
1049 if (prev_tsn == prev->rec.data.TSN_seq) {
1050 /*
1051 * Ok the one I am dropping onto the end is the
1052 * NEXT. A bit of valdiation here.
1053 */
1054 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1055 SCTP_DATA_FIRST_FRAG ||
1056 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 SCTP_DATA_MIDDLE_FRAG) {
1058 /*
1059 * Insert chk MUST be a MIDDLE or LAST
1060 * fragment
1061 */
1062 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1063 SCTP_DATA_FIRST_FRAG) {
1064 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1065 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1066 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1067 0, M_DONTWAIT, 1, MT_DATA);
1068 if (oper) {
1069 struct sctp_paramhdr *ph;
1070 uint32_t *ippp;
1071
1072 SCTP_BUF_LEN(oper) =
1073 sizeof(struct sctp_paramhdr) +
1074 (3 * sizeof(uint32_t));
1075 ph = mtod(oper,
1076 struct sctp_paramhdr *);
1077 ph->param_type =
1078 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1079 ph->param_length =
1080 htons(SCTP_BUF_LEN(oper));
1081 ippp = (uint32_t *) (ph + 1);
1082 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1083 ippp++;
1084 *ippp = chk->rec.data.TSN_seq;
1085 ippp++;
1086 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1087
1088 }
1089 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1090 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1091 *abort_flag = 1;
1092 return;
1093 }
1094 if (chk->rec.data.stream_number !=
1095 prev->rec.data.stream_number) {
1096 /*
1097 * Huh, need the correct STR here,
1098 * they must be the same.
1099 */
1100 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 chk->rec.data.stream_number,
1102 prev->rec.data.stream_number);
1103 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 0, M_DONTWAIT, 1, MT_DATA);
1105 if (oper) {
1106 struct sctp_paramhdr *ph;
1107 uint32_t *ippp;
1108
1109 SCTP_BUF_LEN(oper) =
1110 sizeof(struct sctp_paramhdr) +
1111 (3 * sizeof(uint32_t));
1112 ph = mtod(oper,
1113 struct sctp_paramhdr *);
1114 ph->param_type =
1115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1116 ph->param_length =
1117 htons(SCTP_BUF_LEN(oper));
1118 ippp = (uint32_t *) (ph + 1);
1119 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1120 ippp++;
1121 *ippp = chk->rec.data.TSN_seq;
1122 ippp++;
1123 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1124 }
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1127 *abort_flag = 1;
1128 return;
1129 }
1130 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1131 chk->rec.data.stream_seq !=
1132 prev->rec.data.stream_seq) {
1133 /*
1134 * Huh, need the correct STR here,
1135 * they must be the same.
1136 */
1137 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1138 chk->rec.data.stream_seq,
1139 prev->rec.data.stream_seq);
1140 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1141 0, M_DONTWAIT, 1, MT_DATA);
1142 if (oper) {
1143 struct sctp_paramhdr *ph;
1144 uint32_t *ippp;
1145
1146 SCTP_BUF_LEN(oper) =
1147 sizeof(struct sctp_paramhdr) +
1148 (3 * sizeof(uint32_t));
1149 ph = mtod(oper,
1150 struct sctp_paramhdr *);
1151 ph->param_type =
1152 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1153 ph->param_length =
1154 htons(SCTP_BUF_LEN(oper));
1155 ippp = (uint32_t *) (ph + 1);
1156 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1157 ippp++;
1158 *ippp = chk->rec.data.TSN_seq;
1159 ippp++;
1160 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1161 }
1162 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1163 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1164 *abort_flag = 1;
1165 return;
1166 }
1167 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1168 SCTP_DATA_LAST_FRAG) {
1169 /* Insert chk MUST be a FIRST */
1170 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1171 SCTP_DATA_FIRST_FRAG) {
1172 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1173 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1174 0, M_DONTWAIT, 1, MT_DATA);
1175 if (oper) {
1176 struct sctp_paramhdr *ph;
1177 uint32_t *ippp;
1178
1179 SCTP_BUF_LEN(oper) =
1180 sizeof(struct sctp_paramhdr) +
1181 (3 * sizeof(uint32_t));
1182 ph = mtod(oper,
1183 struct sctp_paramhdr *);
1184 ph->param_type =
1185 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1186 ph->param_length =
1187 htons(SCTP_BUF_LEN(oper));
1188 ippp = (uint32_t *) (ph + 1);
1189 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1190 ippp++;
1191 *ippp = chk->rec.data.TSN_seq;
1192 ippp++;
1193 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1194
1195 }
1196 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1197 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1198 *abort_flag = 1;
1199 return;
1200 }
1201 }
1202 }
1203 }
1204 if (next) {
1205 post_tsn = chk->rec.data.TSN_seq + 1;
1206 if (post_tsn == next->rec.data.TSN_seq) {
1207 /*
1208 * Ok the one I am inserting ahead of is my NEXT
1209 * one. A bit of valdiation here.
1210 */
1211 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1212 /* Insert chk MUST be a last fragment */
1213 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1214 != SCTP_DATA_LAST_FRAG) {
1215 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1216 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1217 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1218 0, M_DONTWAIT, 1, MT_DATA);
1219 if (oper) {
1220 struct sctp_paramhdr *ph;
1221 uint32_t *ippp;
1222
1223 SCTP_BUF_LEN(oper) =
1224 sizeof(struct sctp_paramhdr) +
1225 (3 * sizeof(uint32_t));
1226 ph = mtod(oper,
1227 struct sctp_paramhdr *);
1228 ph->param_type =
1229 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1230 ph->param_length =
1231 htons(SCTP_BUF_LEN(oper));
1232 ippp = (uint32_t *) (ph + 1);
1233 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1234 ippp++;
1235 *ippp = chk->rec.data.TSN_seq;
1236 ippp++;
1237 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1238 }
1239 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1240 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1241 *abort_flag = 1;
1242 return;
1243 }
1244 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1245 SCTP_DATA_MIDDLE_FRAG ||
1246 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1248 /*
1249 * Insert chk CAN be MIDDLE or FIRST NOT
1250 * LAST
1251 */
1252 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 SCTP_DATA_LAST_FRAG) {
1254 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1255 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1256 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1257 0, M_DONTWAIT, 1, MT_DATA);
1258 if (oper) {
1259 struct sctp_paramhdr *ph;
1260 uint32_t *ippp;
1261
1262 SCTP_BUF_LEN(oper) =
1263 sizeof(struct sctp_paramhdr) +
1264 (3 * sizeof(uint32_t));
1265 ph = mtod(oper,
1266 struct sctp_paramhdr *);
1267 ph->param_type =
1268 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1269 ph->param_length =
1270 htons(SCTP_BUF_LEN(oper));
1271 ippp = (uint32_t *) (ph + 1);
1272 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1273 ippp++;
1274 *ippp = chk->rec.data.TSN_seq;
1275 ippp++;
1276 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1277
1278 }
1279 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1280 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1281 *abort_flag = 1;
1282 return;
1283 }
1284 if (chk->rec.data.stream_number !=
1285 next->rec.data.stream_number) {
1286 /*
1287 * Huh, need the correct STR here,
1288 * they must be the same.
1289 */
1290 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1291 chk->rec.data.stream_number,
1292 next->rec.data.stream_number);
1293 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1294 0, M_DONTWAIT, 1, MT_DATA);
1295 if (oper) {
1296 struct sctp_paramhdr *ph;
1297 uint32_t *ippp;
1298
1299 SCTP_BUF_LEN(oper) =
1300 sizeof(struct sctp_paramhdr) +
1301 (3 * sizeof(uint32_t));
1302 ph = mtod(oper,
1303 struct sctp_paramhdr *);
1304 ph->param_type =
1305 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1306 ph->param_length =
1307 htons(SCTP_BUF_LEN(oper));
1308 ippp = (uint32_t *) (ph + 1);
1309 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1310 ippp++;
1311 *ippp = chk->rec.data.TSN_seq;
1312 ippp++;
1313 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1314
1315 }
1316 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1317 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1318 *abort_flag = 1;
1319 return;
1320 }
1321 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1322 chk->rec.data.stream_seq !=
1323 next->rec.data.stream_seq) {
1324 /*
1325 * Huh, need the correct STR here,
1326 * they must be the same.
1327 */
1328 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1329 chk->rec.data.stream_seq,
1330 next->rec.data.stream_seq);
1331 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1332 0, M_DONTWAIT, 1, MT_DATA);
1333 if (oper) {
1334 struct sctp_paramhdr *ph;
1335 uint32_t *ippp;
1336
1337 SCTP_BUF_LEN(oper) =
1338 sizeof(struct sctp_paramhdr) +
1339 (3 * sizeof(uint32_t));
1340 ph = mtod(oper,
1341 struct sctp_paramhdr *);
1342 ph->param_type =
1343 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1344 ph->param_length =
1345 htons(SCTP_BUF_LEN(oper));
1346 ippp = (uint32_t *) (ph + 1);
1347 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1348 ippp++;
1349 *ippp = chk->rec.data.TSN_seq;
1350 ippp++;
1351 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1352 }
1353 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1354 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1355 *abort_flag = 1;
1356 return;
1357 }
1358 }
1359 }
1360 }
1361 /* Do we need to do some delivery? check */
1362 sctp_deliver_reasm_check(stcb, asoc);
1363 }
1364
1365 /*
1366 * This is an unfortunate routine. It checks to make sure a evil guy is not
1367 * stuffing us full of bad packet fragments. A broken peer could also do this
1368 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1369 * :< more cycles.
1370 */
1371 static int
1372 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1373 uint32_t TSN_seq)
1374 {
1375 struct sctp_tmit_chunk *at;
1376 uint32_t tsn_est;
1377
1378 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1379 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1380 /* is it one bigger? */
1381 tsn_est = at->rec.data.TSN_seq + 1;
1382 if (tsn_est == TSN_seq) {
1383 /* yep. It better be a last then */
1384 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_LAST_FRAG) {
1386 /*
1387 * Ok this guy belongs next to a guy
1388 * that is NOT last, it should be a
1389 * middle/last, not a complete
1390 * chunk.
1391 */
1392 return (1);
1393 } else {
1394 /*
1395 * This guy is ok since its a LAST
1396 * and the new chunk is a fully
1397 * self- contained one.
1398 */
1399 return (0);
1400 }
1401 }
1402 } else if (TSN_seq == at->rec.data.TSN_seq) {
1403 /* Software error since I have a dup? */
1404 return (1);
1405 } else {
1406 /*
1407 * Ok, 'at' is larger than new chunk but does it
1408 * need to be right before it.
1409 */
1410 tsn_est = TSN_seq + 1;
1411 if (tsn_est == at->rec.data.TSN_seq) {
1412 /* Yep, It better be a first */
1413 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1414 SCTP_DATA_FIRST_FRAG) {
1415 return (1);
1416 } else {
1417 return (0);
1418 }
1419 }
1420 }
1421 }
1422 return (0);
1423 }
1424
1425
1426 static int
1427 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1428 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1429 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1430 int *break_flag, int last_chunk)
1431 {
1432 /* Process a data chunk */
1433 /* struct sctp_tmit_chunk *chk; */
1434 struct sctp_tmit_chunk *chk;
1435 uint32_t tsn, gap;
1436 struct mbuf *dmbuf;
1437 int the_len;
1438 int need_reasm_check = 0;
1439 uint16_t strmno, strmseq;
1440 struct mbuf *oper;
1441 struct sctp_queued_to_read *control;
1442 int ordered;
1443 uint32_t protocol_id;
1444 uint8_t chunk_flags;
1445 struct sctp_stream_reset_list *liste;
1446
1447 chk = NULL;
1448 tsn = ntohl(ch->dp.tsn);
1449 chunk_flags = ch->ch.chunk_flags;
1450 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1451 asoc->send_sack = 1;
1452 }
1453 protocol_id = ch->dp.protocol_id;
1454 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1456 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1457 }
1458 if (stcb == NULL) {
1459 return (0);
1460 }
1461 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1462 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1463 /* It is a duplicate */
1464 SCTP_STAT_INCR(sctps_recvdupdata);
1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1466 /* Record a dup for the next outbound sack */
1467 asoc->dup_tsns[asoc->numduptsns] = tsn;
1468 asoc->numduptsns++;
1469 }
1470 asoc->send_sack = 1;
1471 return (0);
1472 }
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1475 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1476 /* Can't hold the bit in the mapping at max array, toss it */
1477 return (0);
1478 }
1479 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1480 SCTP_TCB_LOCK_ASSERT(stcb);
1481 if (sctp_expand_mapping_array(asoc, gap)) {
1482 /* Can't expand, drop it */
1483 return (0);
1484 }
1485 }
1486 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1487 *high_tsn = tsn;
1488 }
1489 /* See if we have received this one already */
1490 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1491 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1492 SCTP_STAT_INCR(sctps_recvdupdata);
1493 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1494 /* Record a dup for the next outbound sack */
1495 asoc->dup_tsns[asoc->numduptsns] = tsn;
1496 asoc->numduptsns++;
1497 }
1498 asoc->send_sack = 1;
1499 return (0);
1500 }
1501 /*
1502 * Check to see about the GONE flag, duplicates would cause a sack
1503 * to be sent up above
1504 */
1505 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1506 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1507 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1508 ) {
1509 /*
1510 * wait a minute, this guy is gone, there is no longer a
1511 * receiver. Send peer an ABORT!
1512 */
1513 struct mbuf *op_err;
1514
1515 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1516 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1517 *abort_flag = 1;
1518 return (0);
1519 }
1520 /*
1521 * Now before going further we see if there is room. If NOT then we
1522 * MAY let one through only IF this TSN is the one we are waiting
1523 * for on a partial delivery API.
1524 */
1525
1526 /* now do the tests */
1527 if (((asoc->cnt_on_all_streams +
1528 asoc->cnt_on_reasm_queue +
1529 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1530 (((int)asoc->my_rwnd) <= 0)) {
1531 /*
1532 * When we have NO room in the rwnd we check to make sure
1533 * the reader is doing its job...
1534 */
1535 if (stcb->sctp_socket->so_rcv.sb_cc) {
1536 /* some to read, wake-up */
1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1538 struct socket *so;
1539
1540 so = SCTP_INP_SO(stcb->sctp_ep);
1541 atomic_add_int(&stcb->asoc.refcnt, 1);
1542 SCTP_TCB_UNLOCK(stcb);
1543 SCTP_SOCKET_LOCK(so, 1);
1544 SCTP_TCB_LOCK(stcb);
1545 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1546 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1547 /* assoc was freed while we were unlocked */
1548 SCTP_SOCKET_UNLOCK(so, 1);
1549 return (0);
1550 }
1551 #endif
1552 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 SCTP_SOCKET_UNLOCK(so, 1);
1555 #endif
1556 }
1557 /* now is it in the mapping array of what we have accepted? */
1558 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1559 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1560 /* Nope not in the valid range dump it */
1561 sctp_set_rwnd(stcb, asoc);
1562 if ((asoc->cnt_on_all_streams +
1563 asoc->cnt_on_reasm_queue +
1564 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1565 SCTP_STAT_INCR(sctps_datadropchklmt);
1566 } else {
1567 SCTP_STAT_INCR(sctps_datadroprwnd);
1568 }
1569 *break_flag = 1;
1570 return (0);
1571 }
1572 }
1573 strmno = ntohs(ch->dp.stream_id);
1574 if (strmno >= asoc->streamincnt) {
1575 struct sctp_paramhdr *phdr;
1576 struct mbuf *mb;
1577
1578 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1579 0, M_DONTWAIT, 1, MT_DATA);
1580 if (mb != NULL) {
1581 /* add some space up front so prepend will work well */
1582 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1583 phdr = mtod(mb, struct sctp_paramhdr *);
1584 /*
1585 * Error causes are just param's and this one has
1586 * two back to back phdr, one with the error type
1587 * and size, the other with the streamid and a rsvd
1588 */
1589 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1590 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1591 phdr->param_length =
1592 htons(sizeof(struct sctp_paramhdr) * 2);
1593 phdr++;
1594 /* We insert the stream in the type field */
1595 phdr->param_type = ch->dp.stream_id;
1596 /* And set the length to 0 for the rsvd field */
1597 phdr->param_length = 0;
1598 sctp_queue_op_err(stcb, mb);
1599 }
1600 SCTP_STAT_INCR(sctps_badsid);
1601 SCTP_TCB_LOCK_ASSERT(stcb);
1602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1603 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1604 asoc->highest_tsn_inside_nr_map = tsn;
1605 }
1606 if (tsn == (asoc->cumulative_tsn + 1)) {
1607 /* Update cum-ack */
1608 asoc->cumulative_tsn = tsn;
1609 }
1610 return (0);
1611 }
1612 /*
1613 * Before we continue lets validate that we are not being fooled by
1614 * an evil attacker. We can only have 4k chunks based on our TSN
1615 * spread allowed by the mapping array 512 * 8 bits, so there is no
1616 * way our stream sequence numbers could have wrapped. We of course
1617 * only validate the FIRST fragment so the bit must be set.
1618 */
1619 strmseq = ntohs(ch->dp.stream_sequence);
1620 #ifdef SCTP_ASOCLOG_OF_TSNS
1621 SCTP_TCB_LOCK_ASSERT(stcb);
1622 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1623 asoc->tsn_in_at = 0;
1624 asoc->tsn_in_wrapped = 1;
1625 }
1626 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1627 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1628 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1629 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1630 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1631 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1632 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1633 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1634 asoc->tsn_in_at++;
1635 #endif
1636 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1637 (TAILQ_EMPTY(&asoc->resetHead)) &&
1638 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1639 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1640 /* The incoming sseq is behind where we last delivered? */
1641 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1642 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1643 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1644 0, M_DONTWAIT, 1, MT_DATA);
1645 if (oper) {
1646 struct sctp_paramhdr *ph;
1647 uint32_t *ippp;
1648
1649 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1650 (3 * sizeof(uint32_t));
1651 ph = mtod(oper, struct sctp_paramhdr *);
1652 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1653 ph->param_length = htons(SCTP_BUF_LEN(oper));
1654 ippp = (uint32_t *) (ph + 1);
1655 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1656 ippp++;
1657 *ippp = tsn;
1658 ippp++;
1659 *ippp = ((strmno << 16) | strmseq);
1660
1661 }
1662 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1663 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1664 *abort_flag = 1;
1665 return (0);
1666 }
1667 /************************************
1668 * From here down we may find ch-> invalid
1669 * so its a good idea NOT to use it.
1670 *************************************/
1671
1672 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1673 if (last_chunk == 0) {
1674 dmbuf = SCTP_M_COPYM(*m,
1675 (offset + sizeof(struct sctp_data_chunk)),
1676 the_len, M_DONTWAIT);
1677 #ifdef SCTP_MBUF_LOGGING
1678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1679 struct mbuf *mat;
1680
1681 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1682 if (SCTP_BUF_IS_EXTENDED(mat)) {
1683 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1684 }
1685 }
1686 }
1687 #endif
1688 } else {
1689 /* We can steal the last chunk */
1690 int l_len;
1691
1692 dmbuf = *m;
1693 /* lop off the top part */
1694 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1695 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1696 l_len = SCTP_BUF_LEN(dmbuf);
1697 } else {
1698 /*
1699 * need to count up the size hopefully does not hit
1700 * this to often :-0
1701 */
1702 struct mbuf *lat;
1703
1704 l_len = 0;
1705 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1706 l_len += SCTP_BUF_LEN(lat);
1707 }
1708 }
1709 if (l_len > the_len) {
1710 /* Trim the end round bytes off too */
1711 m_adj(dmbuf, -(l_len - the_len));
1712 }
1713 }
1714 if (dmbuf == NULL) {
1715 SCTP_STAT_INCR(sctps_nomem);
1716 return (0);
1717 }
1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1719 asoc->fragmented_delivery_inprogress == 0 &&
1720 TAILQ_EMPTY(&asoc->resetHead) &&
1721 ((ordered == 0) ||
1722 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1724 /* Candidate for express delivery */
1725 /*
1726 * Its not fragmented, No PD-API is up, Nothing in the
1727 * delivery queue, Its un-ordered OR ordered and the next to
1728 * deliver AND nothing else is stuck on the stream queue,
1729 * And there is room for it in the socket buffer. Lets just
1730 * stuff it up the buffer....
1731 */
1732
1733 /* It would be nice to avoid this copy if we could :< */
1734 sctp_alloc_a_readq(stcb, control);
1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1736 protocol_id,
1737 stcb->asoc.context,
1738 strmno, strmseq,
1739 chunk_flags,
1740 dmbuf);
1741 if (control == NULL) {
1742 goto failed_express_del;
1743 }
1744 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1745 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1746 asoc->highest_tsn_inside_nr_map = tsn;
1747 }
1748 sctp_add_to_readq(stcb->sctp_ep, stcb,
1749 control, &stcb->sctp_socket->so_rcv,
1750 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1751
1752 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1753 /* for ordered, bump what we delivered */
1754 asoc->strmin[strmno].last_sequence_delivered++;
1755 }
1756 SCTP_STAT_INCR(sctps_recvexpress);
1757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1758 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1759 SCTP_STR_LOG_FROM_EXPRS_DEL);
1760 }
1761 control = NULL;
1762
1763 goto finish_express_del;
1764 }
1765 failed_express_del:
1766 /* If we reach here this is a new chunk */
1767 chk = NULL;
1768 control = NULL;
1769 /* Express for fragmented delivery? */
1770 if ((asoc->fragmented_delivery_inprogress) &&
1771 (stcb->asoc.control_pdapi) &&
1772 (asoc->str_of_pdapi == strmno) &&
1773 (asoc->ssn_of_pdapi == strmseq)
1774 ) {
1775 control = stcb->asoc.control_pdapi;
1776 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1777 /* Can't be another first? */
1778 goto failed_pdapi_express_del;
1779 }
1780 if (tsn == (control->sinfo_tsn + 1)) {
1781 /* Yep, we can add it on */
1782 int end = 0;
1783
1784 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1785 end = 1;
1786 }
1787 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1788 tsn,
1789 &stcb->sctp_socket->so_rcv)) {
1790 SCTP_PRINTF("Append fails end:%d\n", end);
1791 goto failed_pdapi_express_del;
1792 }
1793 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1794 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1795 asoc->highest_tsn_inside_nr_map = tsn;
1796 }
1797 SCTP_STAT_INCR(sctps_recvexpressm);
1798 control->sinfo_tsn = tsn;
1799 asoc->tsn_last_delivered = tsn;
1800 asoc->fragment_flags = chunk_flags;
1801 asoc->tsn_of_pdapi_last_delivered = tsn;
1802 asoc->last_flags_delivered = chunk_flags;
1803 asoc->last_strm_seq_delivered = strmseq;
1804 asoc->last_strm_no_delivered = strmno;
1805 if (end) {
1806 /* clean up the flags and such */
1807 asoc->fragmented_delivery_inprogress = 0;
1808 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1809 asoc->strmin[strmno].last_sequence_delivered++;
1810 }
1811 stcb->asoc.control_pdapi = NULL;
1812 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1813 /*
1814 * There could be another message
1815 * ready
1816 */
1817 need_reasm_check = 1;
1818 }
1819 }
1820 control = NULL;
1821 goto finish_express_del;
1822 }
1823 }
1824 failed_pdapi_express_del:
1825 control = NULL;
1826 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1827 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1828 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1829 asoc->highest_tsn_inside_nr_map = tsn;
1830 }
1831 } else {
1832 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1833 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1834 asoc->highest_tsn_inside_map = tsn;
1835 }
1836 }
1837 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1838 sctp_alloc_a_chunk(stcb, chk);
1839 if (chk == NULL) {
1840 /* No memory so we drop the chunk */
1841 SCTP_STAT_INCR(sctps_nomem);
1842 if (last_chunk == 0) {
1843 /* we copied it, free the copy */
1844 sctp_m_freem(dmbuf);
1845 }
1846 return (0);
1847 }
1848 chk->rec.data.TSN_seq = tsn;
1849 chk->no_fr_allowed = 0;
1850 chk->rec.data.stream_seq = strmseq;
1851 chk->rec.data.stream_number = strmno;
1852 chk->rec.data.payloadtype = protocol_id;
1853 chk->rec.data.context = stcb->asoc.context;
1854 chk->rec.data.doing_fast_retransmit = 0;
1855 chk->rec.data.rcv_flags = chunk_flags;
1856 chk->asoc = asoc;
1857 chk->send_size = the_len;
1858 chk->whoTo = net;
1859 atomic_add_int(&net->ref_count, 1);
1860 chk->data = dmbuf;
1861 } else {
1862 sctp_alloc_a_readq(stcb, control);
1863 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1864 protocol_id,
1865 stcb->asoc.context,
1866 strmno, strmseq,
1867 chunk_flags,
1868 dmbuf);
1869 if (control == NULL) {
1870 /* No memory so we drop the chunk */
1871 SCTP_STAT_INCR(sctps_nomem);
1872 if (last_chunk == 0) {
1873 /* we copied it, free the copy */
1874 sctp_m_freem(dmbuf);
1875 }
1876 return (0);
1877 }
1878 control->length = the_len;
1879 }
1880
1881 /* Mark it as received */
1882 /* Now queue it where it belongs */
1883 if (control != NULL) {
1884 /* First a sanity check */
1885 if (asoc->fragmented_delivery_inprogress) {
1886 /*
1887 * Ok, we have a fragmented delivery in progress if
1888 * this chunk is next to deliver OR belongs in our
1889 * view to the reassembly, the peer is evil or
1890 * broken.
1891 */
1892 uint32_t estimate_tsn;
1893
1894 estimate_tsn = asoc->tsn_last_delivered + 1;
1895 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1896 (estimate_tsn == control->sinfo_tsn)) {
1897 /* Evil/Broke peer */
1898 sctp_m_freem(control->data);
1899 control->data = NULL;
1900 if (control->whoFrom) {
1901 sctp_free_remote_addr(control->whoFrom);
1902 control->whoFrom = NULL;
1903 }
1904 sctp_free_a_readq(stcb, control);
1905 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1906 0, M_DONTWAIT, 1, MT_DATA);
1907 if (oper) {
1908 struct sctp_paramhdr *ph;
1909 uint32_t *ippp;
1910
1911 SCTP_BUF_LEN(oper) =
1912 sizeof(struct sctp_paramhdr) +
1913 (3 * sizeof(uint32_t));
1914 ph = mtod(oper, struct sctp_paramhdr *);
1915 ph->param_type =
1916 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1917 ph->param_length = htons(SCTP_BUF_LEN(oper));
1918 ippp = (uint32_t *) (ph + 1);
1919 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1920 ippp++;
1921 *ippp = tsn;
1922 ippp++;
1923 *ippp = ((strmno << 16) | strmseq);
1924 }
1925 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1926 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1927 *abort_flag = 1;
1928 return (0);
1929 } else {
1930 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1931 sctp_m_freem(control->data);
1932 control->data = NULL;
1933 if (control->whoFrom) {
1934 sctp_free_remote_addr(control->whoFrom);
1935 control->whoFrom = NULL;
1936 }
1937 sctp_free_a_readq(stcb, control);
1938
1939 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1940 0, M_DONTWAIT, 1, MT_DATA);
1941 if (oper) {
1942 struct sctp_paramhdr *ph;
1943 uint32_t *ippp;
1944
1945 SCTP_BUF_LEN(oper) =
1946 sizeof(struct sctp_paramhdr) +
1947 (3 * sizeof(uint32_t));
1948 ph = mtod(oper,
1949 struct sctp_paramhdr *);
1950 ph->param_type =
1951 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1952 ph->param_length =
1953 htons(SCTP_BUF_LEN(oper));
1954 ippp = (uint32_t *) (ph + 1);
1955 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1956 ippp++;
1957 *ippp = tsn;
1958 ippp++;
1959 *ippp = ((strmno << 16) | strmseq);
1960 }
1961 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1962 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1963 *abort_flag = 1;
1964 return (0);
1965 }
1966 }
1967 } else {
1968 /* No PDAPI running */
1969 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1970 /*
1971 * Reassembly queue is NOT empty validate
1972 * that this tsn does not need to be in
1973 * reasembly queue. If it does then our peer
1974 * is broken or evil.
1975 */
1976 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1977 sctp_m_freem(control->data);
1978 control->data = NULL;
1979 if (control->whoFrom) {
1980 sctp_free_remote_addr(control->whoFrom);
1981 control->whoFrom = NULL;
1982 }
1983 sctp_free_a_readq(stcb, control);
1984 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1985 0, M_DONTWAIT, 1, MT_DATA);
1986 if (oper) {
1987 struct sctp_paramhdr *ph;
1988 uint32_t *ippp;
1989
1990 SCTP_BUF_LEN(oper) =
1991 sizeof(struct sctp_paramhdr) +
1992 (3 * sizeof(uint32_t));
1993 ph = mtod(oper,
1994 struct sctp_paramhdr *);
1995 ph->param_type =
1996 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1997 ph->param_length =
1998 htons(SCTP_BUF_LEN(oper));
1999 ippp = (uint32_t *) (ph + 1);
2000 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2001 ippp++;
2002 *ippp = tsn;
2003 ippp++;
2004 *ippp = ((strmno << 16) | strmseq);
2005 }
2006 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2007 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2008 *abort_flag = 1;
2009 return (0);
2010 }
2011 }
2012 }
2013 /* ok, if we reach here we have passed the sanity checks */
2014 if (chunk_flags & SCTP_DATA_UNORDERED) {
2015 /* queue directly into socket buffer */
2016 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2017 sctp_add_to_readq(stcb->sctp_ep, stcb,
2018 control,
2019 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2020 } else {
2021 /*
2022 * Special check for when streams are resetting. We
2023 * could be more smart about this and check the
2024 * actual stream to see if it is not being reset..
2025 * that way we would not create a HOLB when amongst
2026 * streams being reset and those not being reset.
2027 *
2028 * We take complete messages that have a stream reset
2029 * intervening (aka the TSN is after where our
2030 * cum-ack needs to be) off and put them on a
2031 * pending_reply_queue. The reassembly ones we do
2032 * not have to worry about since they are all sorted
2033 * and proceessed by TSN order. It is only the
2034 * singletons I must worry about.
2035 */
2036 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2037 SCTP_TSN_GT(tsn, liste->tsn)) {
2038 /*
2039 * yep its past where we need to reset... go
2040 * ahead and queue it.
2041 */
2042 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2043 /* first one on */
2044 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2045 } else {
2046 struct sctp_queued_to_read *ctlOn,
2047 *nctlOn;
2048 unsigned char inserted = 0;
2049
2050 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2051 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2052 continue;
2053 } else {
2054 /* found it */
2055 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2056 inserted = 1;
2057 break;
2058 }
2059 }
2060 if (inserted == 0) {
2061 /*
2062 * must be put at end, use
2063 * prevP (all setup from
2064 * loop) to setup nextP.
2065 */
2066 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2067 }
2068 }
2069 } else {
2070 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2071 if (*abort_flag) {
2072 return (0);
2073 }
2074 }
2075 }
2076 } else {
2077 /* Into the re-assembly queue */
2078 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2079 if (*abort_flag) {
2080 /*
2081 * the assoc is now gone and chk was put onto the
2082 * reasm queue, which has all been freed.
2083 */
2084 *m = NULL;
2085 return (0);
2086 }
2087 }
2088 finish_express_del:
2089 if (tsn == (asoc->cumulative_tsn + 1)) {
2090 /* Update cum-ack */
2091 asoc->cumulative_tsn = tsn;
2092 }
2093 if (last_chunk) {
2094 *m = NULL;
2095 }
2096 if (ordered) {
2097 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2098 } else {
2099 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2100 }
2101 SCTP_STAT_INCR(sctps_recvdata);
2102 /* Set it present please */
2103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2104 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2105 }
2106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2107 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2108 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2109 }
2110 /* check the special flag for stream resets */
2111 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2112 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2113 /*
2114 * we have finished working through the backlogged TSN's now
2115 * time to reset streams. 1: call reset function. 2: free
2116 * pending_reply space 3: distribute any chunks in
2117 * pending_reply_queue.
2118 */
2119 struct sctp_queued_to_read *ctl, *nctl;
2120
2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2123 SCTP_FREE(liste, SCTP_M_STRESET);
2124 /* sa_ignore FREED_MEMORY */
2125 liste = TAILQ_FIRST(&asoc->resetHead);
2126 if (TAILQ_EMPTY(&asoc->resetHead)) {
2127 /* All can be removed */
2128 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2129 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2130 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2131 if (*abort_flag) {
2132 return (0);
2133 }
2134 }
2135 } else {
2136 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2137 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2138 break;
2139 }
2140 /*
2141 * if ctl->sinfo_tsn is <= liste->tsn we can
2142 * process it which is the NOT of
2143 * ctl->sinfo_tsn > liste->tsn
2144 */
2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2147 if (*abort_flag) {
2148 return (0);
2149 }
2150 }
2151 }
2152 /*
2153 * Now service re-assembly to pick up anything that has been
2154 * held on reassembly queue?
2155 */
2156 sctp_deliver_reasm_check(stcb, asoc);
2157 need_reasm_check = 0;
2158 }
2159 if (need_reasm_check) {
2160 /* Another one waits ? */
2161 sctp_deliver_reasm_check(stcb, asoc);
2162 }
2163 return (1);
2164 }
2165
2166 int8_t sctp_map_lookup_tab[256] = {
2167 0, 1, 0, 2, 0, 1, 0, 3,
2168 0, 1, 0, 2, 0, 1, 0, 4,
2169 0, 1, 0, 2, 0, 1, 0, 3,
2170 0, 1, 0, 2, 0, 1, 0, 5,
2171 0, 1, 0, 2, 0, 1, 0, 3,
2172 0, 1, 0, 2, 0, 1, 0, 4,
2173 0, 1, 0, 2, 0, 1, 0, 3,
2174 0, 1, 0, 2, 0, 1, 0, 6,
2175 0, 1, 0, 2, 0, 1, 0, 3,
2176 0, 1, 0, 2, 0, 1, 0, 4,
2177 0, 1, 0, 2, 0, 1, 0, 3,
2178 0, 1, 0, 2, 0, 1, 0, 5,
2179 0, 1, 0, 2, 0, 1, 0, 3,
2180 0, 1, 0, 2, 0, 1, 0, 4,
2181 0, 1, 0, 2, 0, 1, 0, 3,
2182 0, 1, 0, 2, 0, 1, 0, 7,
2183 0, 1, 0, 2, 0, 1, 0, 3,
2184 0, 1, 0, 2, 0, 1, 0, 4,
2185 0, 1, 0, 2, 0, 1, 0, 3,
2186 0, 1, 0, 2, 0, 1, 0, 5,
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 4,
2189 0, 1, 0, 2, 0, 1, 0, 3,
2190 0, 1, 0, 2, 0, 1, 0, 6,
2191 0, 1, 0, 2, 0, 1, 0, 3,
2192 0, 1, 0, 2, 0, 1, 0, 4,
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 5,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 4,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 8
2199 };
2200
2201
2202 void
2203 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2204 {
2205 /*
2206 * Now we also need to check the mapping array in a couple of ways.
2207 * 1) Did we move the cum-ack point?
2208 *
2209 * When you first glance at this you might think that all entries that
2210 * make up the postion of the cum-ack would be in the nr-mapping
2211 * array only.. i.e. things up to the cum-ack are always
2212 * deliverable. Thats true with one exception, when its a fragmented
2213 * message we may not deliver the data until some threshold (or all
2214 * of it) is in place. So we must OR the nr_mapping_array and
2215 * mapping_array to get a true picture of the cum-ack.
2216 */
2217 struct sctp_association *asoc;
2218 int at;
2219 uint8_t val;
2220 int slide_from, slide_end, lgap, distance;
2221 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2222
2223 asoc = &stcb->asoc;
2224
2225 old_cumack = asoc->cumulative_tsn;
2226 old_base = asoc->mapping_array_base_tsn;
2227 old_highest = asoc->highest_tsn_inside_map;
2228 /*
2229 * We could probably improve this a small bit by calculating the
2230 * offset of the current cum-ack as the starting point.
2231 */
2232 at = 0;
2233 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2234 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2235 if (val == 0xff) {
2236 at += 8;
2237 } else {
2238 /* there is a 0 bit */
2239 at += sctp_map_lookup_tab[val];
2240 break;
2241 }
2242 }
2243 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2244
2245 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2246 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2247 #ifdef INVARIANTS
2248 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2249 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2250 #else
2251 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2252 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2253 sctp_print_mapping_array(asoc);
2254 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2255 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2256 }
2257 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2258 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2259 #endif
2260 }
2261 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2262 highest_tsn = asoc->highest_tsn_inside_nr_map;
2263 } else {
2264 highest_tsn = asoc->highest_tsn_inside_map;
2265 }
2266 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2267 /* The complete array was completed by a single FR */
2268 /* highest becomes the cum-ack */
2269 int clr;
2270
2271 #ifdef INVARIANTS
2272 unsigned int i;
2273
2274 #endif
2275
2276 /* clear the array */
2277 clr = ((at + 7) >> 3);
2278 if (clr > asoc->mapping_array_size) {
2279 clr = asoc->mapping_array_size;
2280 }
2281 memset(asoc->mapping_array, 0, clr);
2282 memset(asoc->nr_mapping_array, 0, clr);
2283 #ifdef INVARIANTS
2284 for (i = 0; i < asoc->mapping_array_size; i++) {
2285 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2286 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2287 sctp_print_mapping_array(asoc);
2288 }
2289 }
2290 #endif
2291 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2292 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2293 } else if (at >= 8) {
2294 /* we can slide the mapping array down */
2295 /* slide_from holds where we hit the first NON 0xff byte */
2296
2297 /*
2298 * now calculate the ceiling of the move using our highest
2299 * TSN value
2300 */
2301 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2302 slide_end = (lgap >> 3);
2303 if (slide_end < slide_from) {
2304 sctp_print_mapping_array(asoc);
2305 #ifdef INVARIANTS
2306 panic("impossible slide");
2307 #else
2308 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2309 lgap, slide_end, slide_from, at);
2310 return;
2311 #endif
2312 }
2313 if (slide_end > asoc->mapping_array_size) {
2314 #ifdef INVARIANTS
2315 panic("would overrun buffer");
2316 #else
2317 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2318 asoc->mapping_array_size, slide_end);
2319 slide_end = asoc->mapping_array_size;
2320 #endif
2321 }
2322 distance = (slide_end - slide_from) + 1;
2323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2324 sctp_log_map(old_base, old_cumack, old_highest,
2325 SCTP_MAP_PREPARE_SLIDE);
2326 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2327 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2328 }
2329 if (distance + slide_from > asoc->mapping_array_size ||
2330 distance < 0) {
2331 /*
2332 * Here we do NOT slide forward the array so that
2333 * hopefully when more data comes in to fill it up
2334 * we will be able to slide it forward. Really I
2335 * don't think this should happen :-0
2336 */
2337
2338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2339 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2340 (uint32_t) asoc->mapping_array_size,
2341 SCTP_MAP_SLIDE_NONE);
2342 }
2343 } else {
2344 int ii;
2345
2346 for (ii = 0; ii < distance; ii++) {
2347 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2348 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2349
2350 }
2351 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2352 asoc->mapping_array[ii] = 0;
2353 asoc->nr_mapping_array[ii] = 0;
2354 }
2355 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2356 asoc->highest_tsn_inside_map += (slide_from << 3);
2357 }
2358 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2359 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2360 }
2361 asoc->mapping_array_base_tsn += (slide_from << 3);
2362 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2363 sctp_log_map(asoc->mapping_array_base_tsn,
2364 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2365 SCTP_MAP_SLIDE_RESULT);
2366 }
2367 }
2368 }
2369 }
2370
2371 void
2372 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2373 {
2374 struct sctp_association *asoc;
2375 uint32_t highest_tsn;
2376
2377 asoc = &stcb->asoc;
2378 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2379 highest_tsn = asoc->highest_tsn_inside_nr_map;
2380 } else {
2381 highest_tsn = asoc->highest_tsn_inside_map;
2382 }
2383
2384 /*
2385 * Now we need to see if we need to queue a sack or just start the
2386 * timer (if allowed).
2387 */
2388 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2389 /*
2390 * Ok special case, in SHUTDOWN-SENT case. here we maker
2391 * sure SACK timer is off and instead send a SHUTDOWN and a
2392 * SACK
2393 */
2394 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2395 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2396 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2397 }
2398 sctp_send_shutdown(stcb,
2399 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2400 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2401 } else {
2402 int is_a_gap;
2403
2404 /* is there a gap now ? */
2405 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2406
2407 /*
2408 * CMT DAC algorithm: increase number of packets received
2409 * since last ack
2410 */
2411 stcb->asoc.cmt_dac_pkts_rcvd++;
2412
2413 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2414 * SACK */
2415 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2416 * longer is one */
2417 (stcb->asoc.numduptsns) || /* we have dup's */
2418 (is_a_gap) || /* is still a gap */
2419 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2420 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2421 ) {
2422
2423 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2424 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2425 (stcb->asoc.send_sack == 0) &&
2426 (stcb->asoc.numduptsns == 0) &&
2427 (stcb->asoc.delayed_ack) &&
2428 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2429
2430 /*
2431 * CMT DAC algorithm: With CMT, delay acks
2432 * even in the face of
2433 *
2434 * reordering. Therefore, if acks that do not
2435 * have to be sent because of the above
2436 * reasons, will be delayed. That is, acks
2437 * that would have been sent due to gap
2438 * reports will be delayed with DAC. Start
2439 * the delayed ack timer.
2440 */
2441 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2442 stcb->sctp_ep, stcb, NULL);
2443 } else {
2444 /*
2445 * Ok we must build a SACK since the timer
2446 * is pending, we got our first packet OR
2447 * there are gaps or duplicates.
2448 */
2449 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2450 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2451 }
2452 } else {
2453 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2454 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2455 stcb->sctp_ep, stcb, NULL);
2456 }
2457 }
2458 }
2459 }
2460
2461 void
2462 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2463 {
2464 struct sctp_tmit_chunk *chk;
2465 uint32_t tsize, pd_point;
2466 uint16_t nxt_todel;
2467
2468 if (asoc->fragmented_delivery_inprogress) {
2469 sctp_service_reassembly(stcb, asoc);
2470 }
2471 /* Can we proceed further, i.e. the PD-API is complete */
2472 if (asoc->fragmented_delivery_inprogress) {
2473 /* no */
2474 return;
2475 }
2476 /*
2477 * Now is there some other chunk I can deliver from the reassembly
2478 * queue.
2479 */
2480 doit_again:
2481 chk = TAILQ_FIRST(&asoc->reasmqueue);
2482 if (chk == NULL) {
2483 asoc->size_on_reasm_queue = 0;
2484 asoc->cnt_on_reasm_queue = 0;
2485 return;
2486 }
2487 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2488 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2489 ((nxt_todel == chk->rec.data.stream_seq) ||
2490 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2491 /*
2492 * Yep the first one is here. We setup to start reception,
2493 * by backing down the TSN just in case we can't deliver.
2494 */
2495
2496 /*
2497 * Before we start though either all of the message should
2498 * be here or the socket buffer max or nothing on the
2499 * delivery queue and something can be delivered.
2500 */
2501 if (stcb->sctp_socket) {
2502 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2503 stcb->sctp_ep->partial_delivery_point);
2504 } else {
2505 pd_point = stcb->sctp_ep->partial_delivery_point;
2506 }
2507 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2508 asoc->fragmented_delivery_inprogress = 1;
2509 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2510 asoc->str_of_pdapi = chk->rec.data.stream_number;
2511 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2512 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2513 asoc->fragment_flags = chk->rec.data.rcv_flags;
2514 sctp_service_reassembly(stcb, asoc);
2515 if (asoc->fragmented_delivery_inprogress == 0) {
2516 goto doit_again;
2517 }
2518 }
2519 }
2520 }
2521
2522 int
2523 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2524 struct sctphdr *sh, struct sctp_inpcb *inp,
2525 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2526 uint8_t use_mflowid, uint32_t mflowid,
2527 uint32_t vrf_id, uint16_t port)
2528 {
2529 struct sctp_data_chunk *ch, chunk_buf;
2530 struct sctp_association *asoc;
2531 int num_chunks = 0; /* number of control chunks processed */
2532 int stop_proc = 0;
2533 int chk_length, break_flag, last_chunk;
2534 int abort_flag = 0, was_a_gap;
2535 struct mbuf *m;
2536 uint32_t highest_tsn;
2537
2538 /* set the rwnd */
2539 sctp_set_rwnd(stcb, &stcb->asoc);
2540
2541 m = *mm;
2542 SCTP_TCB_LOCK_ASSERT(stcb);
2543 asoc = &stcb->asoc;
2544 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2545 highest_tsn = asoc->highest_tsn_inside_nr_map;
2546 } else {
2547 highest_tsn = asoc->highest_tsn_inside_map;
2548 }
2549 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2550 /*
2551 * setup where we got the last DATA packet from for any SACK that
2552 * may need to go out. Don't bump the net. This is done ONLY when a
2553 * chunk is assigned.
2554 */
2555 asoc->last_data_chunk_from = net;
2556
2557 /*-
2558 * Now before we proceed we must figure out if this is a wasted
2559 * cluster... i.e. it is a small packet sent in and yet the driver
2560 * underneath allocated a full cluster for it. If so we must copy it
2561 * to a smaller mbuf and free up the cluster mbuf. This will help
2562 * with cluster starvation. Note for __Panda__ we don't do this
2563 * since it has clusters all the way down to 64 bytes.
2564 */
2565 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2566 /* we only handle mbufs that are singletons.. not chains */
2567 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2568 if (m) {
2569 /* ok lets see if we can copy the data up */
2570 caddr_t *from, *to;
2571
2572 /* get the pointers and copy */
2573 to = mtod(m, caddr_t *);
2574 from = mtod((*mm), caddr_t *);
2575 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2576 /* copy the length and free up the old */
2577 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2578 sctp_m_freem(*mm);
2579 /* sucess, back copy */
2580 *mm = m;
2581 } else {
2582 /* We are in trouble in the mbuf world .. yikes */
2583 m = *mm;
2584 }
2585 }
2586 /* get pointer to the first chunk header */
2587 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2588 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2589 if (ch == NULL) {
2590 return (1);
2591 }
2592 /*
2593 * process all DATA chunks...
2594 */
2595 *high_tsn = asoc->cumulative_tsn;
2596 break_flag = 0;
2597 asoc->data_pkts_seen++;
2598 while (stop_proc == 0) {
2599 /* validate chunk length */
2600 chk_length = ntohs(ch->ch.chunk_length);
2601 if (length - *offset < chk_length) {
2602 /* all done, mutulated chunk */
2603 stop_proc = 1;
2604 continue;
2605 }
2606 if (ch->ch.chunk_type == SCTP_DATA) {
2607 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2608 /*
2609 * Need to send an abort since we had a
2610 * invalid data chunk.
2611 */
2612 struct mbuf *op_err;
2613
2614 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2615 0, M_DONTWAIT, 1, MT_DATA);
2616
2617 if (op_err) {
2618 struct sctp_paramhdr *ph;
2619 uint32_t *ippp;
2620
2621 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2622 (2 * sizeof(uint32_t));
2623 ph = mtod(op_err, struct sctp_paramhdr *);
2624 ph->param_type =
2625 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2626 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2627 ippp = (uint32_t *) (ph + 1);
2628 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2629 ippp++;
2630 *ippp = asoc->cumulative_tsn;
2631
2632 }
2633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2634 sctp_abort_association(inp, stcb, m, iphlen, sh,
2635 op_err,
2636 use_mflowid, mflowid,
2637 vrf_id, port);
2638 return (2);
2639 }
2640 #ifdef SCTP_AUDITING_ENABLED
2641 sctp_audit_log(0xB1, 0);
2642 #endif
2643 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2644 last_chunk = 1;
2645 } else {
2646 last_chunk = 0;
2647 }
2648 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2649 chk_length, net, high_tsn, &abort_flag, &break_flag,
2650 last_chunk)) {
2651 num_chunks++;
2652 }
2653 if (abort_flag)
2654 return (2);
2655
2656 if (break_flag) {
2657 /*
2658 * Set because of out of rwnd space and no
2659 * drop rep space left.
2660 */
2661 stop_proc = 1;
2662 continue;
2663 }
2664 } else {
2665 /* not a data chunk in the data region */
2666 switch (ch->ch.chunk_type) {
2667 case SCTP_INITIATION:
2668 case SCTP_INITIATION_ACK:
2669 case SCTP_SELECTIVE_ACK:
2670 case SCTP_NR_SELECTIVE_ACK:
2671 case SCTP_HEARTBEAT_REQUEST:
2672 case SCTP_HEARTBEAT_ACK:
2673 case SCTP_ABORT_ASSOCIATION:
2674 case SCTP_SHUTDOWN:
2675 case SCTP_SHUTDOWN_ACK:
2676 case SCTP_OPERATION_ERROR:
2677 case SCTP_COOKIE_ECHO:
2678 case SCTP_COOKIE_ACK:
2679 case SCTP_ECN_ECHO:
2680 case SCTP_ECN_CWR:
2681 case SCTP_SHUTDOWN_COMPLETE:
2682 case SCTP_AUTHENTICATION:
2683 case SCTP_ASCONF_ACK:
2684 case SCTP_PACKET_DROPPED:
2685 case SCTP_STREAM_RESET:
2686 case SCTP_FORWARD_CUM_TSN:
2687 case SCTP_ASCONF:
2688 /*
2689 * Now, what do we do with KNOWN chunks that
2690 * are NOT in the right place?
2691 *
2692 * For now, I do nothing but ignore them. We
2693 * may later want to add sysctl stuff to
2694 * switch out and do either an ABORT() or
2695 * possibly process them.
2696 */
2697 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2698 struct mbuf *op_err;
2699
2700 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2701 sctp_abort_association(inp, stcb,
2702 m, iphlen,
2703 sh, op_err,
2704 use_mflowid, mflowid,
2705 vrf_id, port);
2706 return (2);
2707 }
2708 break;
2709 default:
2710 /* unknown chunk type, use bit rules */
2711 if (ch->ch.chunk_type & 0x40) {
2712 /* Add a error report to the queue */
2713 struct mbuf *merr;
2714 struct sctp_paramhdr *phd;
2715
2716 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2717 if (merr) {
2718 phd = mtod(merr, struct sctp_paramhdr *);
2719 /*
2720 * We cheat and use param
2721 * type since we did not
2722 * bother to define a error
2723 * cause struct. They are
2724 * the same basic format
2725 * with different names.
2726 */
2727 phd->param_type =
2728 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2729 phd->param_length =
2730 htons(chk_length + sizeof(*phd));
2731 SCTP_BUF_LEN(merr) = sizeof(*phd);
2732 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2733 if (SCTP_BUF_NEXT(merr)) {
2734 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2735 sctp_m_freem(merr);
2736 } else {
2737 sctp_queue_op_err(stcb, merr);
2738 }
2739 } else {
2740 sctp_m_freem(merr);
2741 }
2742 }
2743 }
2744 if ((ch->ch.chunk_type & 0x80) == 0) {
2745 /* discard the rest of this packet */
2746 stop_proc = 1;
2747 } /* else skip this bad chunk and
2748 * continue... */
2749 break;
2750 } /* switch of chunk type */
2751 }
2752 *offset += SCTP_SIZE32(chk_length);
2753 if ((*offset >= length) || stop_proc) {
2754 /* no more data left in the mbuf chain */
2755 stop_proc = 1;
2756 continue;
2757 }
2758 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2759 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2760 if (ch == NULL) {
2761 *offset = length;
2762 stop_proc = 1;
2763 continue;
2764 }
2765 }
2766 if (break_flag) {
2767 /*
2768 * we need to report rwnd overrun drops.
2769 */
2770 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2771 }
2772 if (num_chunks) {
2773 /*
2774 * Did we get data, if so update the time for auto-close and
2775 * give peer credit for being alive.
2776 */
2777 SCTP_STAT_INCR(sctps_recvpktwithdata);
2778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2779 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2780 stcb->asoc.overall_error_count,
2781 0,
2782 SCTP_FROM_SCTP_INDATA,
2783 __LINE__);
2784 }
2785 stcb->asoc.overall_error_count = 0;
2786 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2787 }
2788 /* now service all of the reassm queue if needed */
2789 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2790 sctp_service_queues(stcb, asoc);
2791
2792 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2793 /* Assure that we ack right away */
2794 stcb->asoc.send_sack = 1;
2795 }
2796 /* Start a sack timer or QUEUE a SACK for sending */
2797 sctp_sack_check(stcb, was_a_gap);
2798 return (0);
2799 }
2800
2801 static int
2802 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2803 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2804 int *num_frs,
2805 uint32_t * biggest_newly_acked_tsn,
2806 uint32_t * this_sack_lowest_newack,
2807 int *rto_ok)
2808 {
2809 struct sctp_tmit_chunk *tp1;
2810 unsigned int theTSN;
2811 int j, wake_him = 0, circled = 0;
2812
2813 /* Recover the tp1 we last saw */
2814 tp1 = *p_tp1;
2815 if (tp1 == NULL) {
2816 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2817 }
2818 for (j = frag_strt; j <= frag_end; j++) {
2819 theTSN = j + last_tsn;
2820 while (tp1) {
2821 if (tp1->rec.data.doing_fast_retransmit)
2822 (*num_frs) += 1;
2823
2824 /*-
2825 * CMT: CUCv2 algorithm. For each TSN being
2826 * processed from the sent queue, track the
2827 * next expected pseudo-cumack, or
2828 * rtx_pseudo_cumack, if required. Separate
2829 * cumack trackers for first transmissions,
2830 * and retransmissions.
2831 */
2832 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 (tp1->snd_count == 1)) {
2834 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2835 tp1->whoTo->find_pseudo_cumack = 0;
2836 }
2837 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2838 (tp1->snd_count > 1)) {
2839 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2840 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2841 }
2842 if (tp1->rec.data.TSN_seq == theTSN) {
2843 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2844 /*-
2845 * must be held until
2846 * cum-ack passes
2847 */
2848 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2849 /*-
2850 * If it is less than RESEND, it is
2851 * now no-longer in flight.
2852 * Higher values may already be set
2853 * via previous Gap Ack Blocks...
2854 * i.e. ACKED or RESEND.
2855 */
2856 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2857 *biggest_newly_acked_tsn)) {
2858 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2859 }
2860 /*-
2861 * CMT: SFR algo (and HTNA) - set
2862 * saw_newack to 1 for dest being
2863 * newly acked. update
2864 * this_sack_highest_newack if
2865 * appropriate.
2866 */
2867 if (tp1->rec.data.chunk_was_revoked == 0)
2868 tp1->whoTo->saw_newack = 1;
2869
2870 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2871 tp1->whoTo->this_sack_highest_newack)) {
2872 tp1->whoTo->this_sack_highest_newack =
2873 tp1->rec.data.TSN_seq;
2874 }
2875 /*-
2876 * CMT DAC algo: also update
2877 * this_sack_lowest_newack
2878 */
2879 if (*this_sack_lowest_newack == 0) {
2880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2881 sctp_log_sack(*this_sack_lowest_newack,
2882 last_tsn,
2883 tp1->rec.data.TSN_seq,
2884 0,
2885 0,
2886 SCTP_LOG_TSN_ACKED);
2887 }
2888 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2889 }
2890 /*-
2891 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2892 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2893 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2894 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2895 * Separate pseudo_cumack trackers for first transmissions and
2896 * retransmissions.
2897 */
2898 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2899 if (tp1->rec.data.chunk_was_revoked == 0) {
2900 tp1->whoTo->new_pseudo_cumack = 1;
2901 }
2902 tp1->whoTo->find_pseudo_cumack = 1;
2903 }
2904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2905 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2906 }
2907 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2908 if (tp1->rec.data.chunk_was_revoked == 0) {
2909 tp1->whoTo->new_pseudo_cumack = 1;
2910 }
2911 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2912 }
2913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2914 sctp_log_sack(*biggest_newly_acked_tsn,
2915 last_tsn,
2916 tp1->rec.data.TSN_seq,
2917 frag_strt,
2918 frag_end,
2919 SCTP_LOG_TSN_ACKED);
2920 }
2921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2922 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2923 tp1->whoTo->flight_size,
2924 tp1->book_size,
2925 (uintptr_t) tp1->whoTo,
2926 tp1->rec.data.TSN_seq);
2927 }
2928 sctp_flight_size_decrease(tp1);
2929 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2930 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2931 tp1);
2932 }
2933 sctp_total_flight_decrease(stcb, tp1);
2934
2935 tp1->whoTo->net_ack += tp1->send_size;
2936 if (tp1->snd_count < 2) {
2937 /*-
2938 * True non-retransmited chunk
2939 */
2940 tp1->whoTo->net_ack2 += tp1->send_size;
2941
2942 /*-
2943 * update RTO too ?
2944 */
2945 if (tp1->do_rtt) {
2946 if (*rto_ok) {
2947 tp1->whoTo->RTO =
2948 sctp_calculate_rto(stcb,
2949 &stcb->asoc,
2950 tp1->whoTo,
2951 &tp1->sent_rcv_time,
2952 sctp_align_safe_nocopy,
2953 SCTP_RTT_FROM_DATA);
2954 *rto_ok = 0;
2955 }
2956 if (tp1->whoTo->rto_needed == 0) {
2957 tp1->whoTo->rto_needed = 1;
2958 }
2959 tp1->do_rtt = 0;
2960 }
2961 }
2962 }
2963 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2964 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2965 stcb->asoc.this_sack_highest_gap)) {
2966 stcb->asoc.this_sack_highest_gap =
2967 tp1->rec.data.TSN_seq;
2968 }
2969 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2970 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2971 #ifdef SCTP_AUDITING_ENABLED
2972 sctp_audit_log(0xB2,
2973 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2974 #endif
2975 }
2976 }
2977 /*-
2978 * All chunks NOT UNSENT fall through here and are marked
2979 * (leave PR-SCTP ones that are to skip alone though)
2980 */
2981 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2982 tp1->sent = SCTP_DATAGRAM_MARKED;
2983
2984 if (tp1->rec.data.chunk_was_revoked) {
2985 /* deflate the cwnd */
2986 tp1->whoTo->cwnd -= tp1->book_size;
2987 tp1->rec.data.chunk_was_revoked = 0;
2988 }
2989 /* NR Sack code here */
2990 if (nr_sacking) {
2991 if (tp1->data) {
2992 /*
2993 * sa_ignore
2994 * NO_NULL_CHK
2995 */
2996 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2997 sctp_m_freem(tp1->data);
2998 tp1->data = NULL;
2999 }
3000 wake_him++;
3001 }
3002 }
3003 break;
3004 } /* if (tp1->TSN_seq == theTSN) */
3005 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3006 break;
3007 }
3008 tp1 = TAILQ_NEXT(tp1, sctp_next);
3009 if ((tp1 == NULL) && (circled == 0)) {
3010 circled++;
3011 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3012 }
3013 } /* end while (tp1) */
3014 if (tp1 == NULL) {
3015 circled = 0;
3016 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3017 }
3018 /* In case the fragments were not in order we must reset */
3019 } /* end for (j = fragStart */
3020 *p_tp1 = tp1;
3021 return (wake_him); /* Return value only used for nr-sack */
3022 }
3023
3024
3025 static int
3026 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3027 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3028 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3029 int num_seg, int num_nr_seg, int *rto_ok)
3030 {
3031 struct sctp_gap_ack_block *frag, block;
3032 struct sctp_tmit_chunk *tp1;
3033 int i;
3034 int num_frs = 0;
3035 int chunk_freed;
3036 int non_revocable;
3037 uint16_t frag_strt, frag_end, prev_frag_end;
3038
3039 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3040 prev_frag_end = 0;
3041 chunk_freed = 0;
3042
3043 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3044 if (i == num_seg) {
3045 prev_frag_end = 0;
3046 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3047 }
3048 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3049 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3050 *offset += sizeof(block);
3051 if (frag == NULL) {
3052 return (chunk_freed);
3053 }
3054 frag_strt = ntohs(frag->start);
3055 frag_end = ntohs(frag->end);
3056
3057 if (frag_strt > frag_end) {
3058 /* This gap report is malformed, skip it. */
3059 continue;
3060 }
3061 if (frag_strt <= prev_frag_end) {
3062 /* This gap report is not in order, so restart. */
3063 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3064 }
3065 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3066 *biggest_tsn_acked = last_tsn + frag_end;
3067 }
3068 if (i < num_seg) {
3069 non_revocable = 0;
3070 } else {
3071 non_revocable = 1;
3072 }
3073 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3074 non_revocable, &num_frs, biggest_newly_acked_tsn,
3075 this_sack_lowest_newack, rto_ok)) {
3076 chunk_freed = 1;
3077 }
3078 prev_frag_end = frag_end;
3079 }
3080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3081 if (num_frs)
3082 sctp_log_fr(*biggest_tsn_acked,
3083 *biggest_newly_acked_tsn,
3084 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3085 }
3086 return (chunk_freed);
3087 }
3088
3089 static void
3090 sctp_check_for_revoked(struct sctp_tcb *stcb,
3091 struct sctp_association *asoc, uint32_t cumack,
3092 uint32_t biggest_tsn_acked)
3093 {
3094 struct sctp_tmit_chunk *tp1;
3095 int tot_revoked = 0;
3096
3097 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3098 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3099 /*
3100 * ok this guy is either ACK or MARKED. If it is
3101 * ACKED it has been previously acked but not this
3102 * time i.e. revoked. If it is MARKED it was ACK'ed
3103 * again.
3104 */
3105 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3106 break;
3107 }
3108 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3109 /* it has been revoked */
3110 tp1->sent = SCTP_DATAGRAM_SENT;
3111 tp1->rec.data.chunk_was_revoked = 1;
3112 /*
3113 * We must add this stuff back in to assure
3114 * timers and such get started.
3115 */
3116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3117 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3118 tp1->whoTo->flight_size,
3119 tp1->book_size,
3120 (uintptr_t) tp1->whoTo,
3121 tp1->rec.data.TSN_seq);
3122 }
3123 sctp_flight_size_increase(tp1);
3124 sctp_total_flight_increase(stcb, tp1);
3125 /*
3126 * We inflate the cwnd to compensate for our
3127 * artificial inflation of the flight_size.
3128 */
3129 tp1->whoTo->cwnd += tp1->book_size;
3130 tot_revoked++;
3131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3132 sctp_log_sack(asoc->last_acked_seq,
3133 cumack,
3134 tp1->rec.data.TSN_seq,
3135 0,
3136 0,
3137 SCTP_LOG_TSN_REVOKED);
3138 }
3139 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3140 /* it has been re-acked in this SACK */
3141 tp1->sent = SCTP_DATAGRAM_ACKED;
3142 }
3143 }
3144 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3145 break;
3146 }
3147 }
3148
3149
3150 static void
3151 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3152 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3153 {
3154 struct sctp_tmit_chunk *tp1;
3155 int strike_flag = 0;
3156 struct timeval now;
3157 int tot_retrans = 0;
3158 uint32_t sending_seq;
3159 struct sctp_nets *net;
3160 int num_dests_sacked = 0;
3161
3162 /*
3163 * select the sending_seq, this is either the next thing ready to be
3164 * sent but not transmitted, OR, the next seq we assign.
3165 */
3166 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3167 if (tp1 == NULL) {
3168 sending_seq = asoc->sending_seq;
3169 } else {
3170 sending_seq = tp1->rec.data.TSN_seq;
3171 }
3172
3173 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3174 if ((asoc->sctp_cmt_on_off > 0) &&
3175 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3177 if (net->saw_newack)
3178 num_dests_sacked++;
3179 }
3180 }
3181 if (stcb->asoc.peer_supports_prsctp) {
3182 (void)SCTP_GETTIME_TIMEVAL(&now);
3183 }
3184 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3185 strike_flag = 0;
3186 if (tp1->no_fr_allowed) {
3187 /* this one had a timeout or something */
3188 continue;
3189 }
3190 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3191 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3192 sctp_log_fr(biggest_tsn_newly_acked,
3193 tp1->rec.data.TSN_seq,
3194 tp1->sent,
3195 SCTP_FR_LOG_CHECK_STRIKE);
3196 }
3197 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3198 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3199 /* done */
3200 break;
3201 }
3202 if (stcb->asoc.peer_supports_prsctp) {
3203 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3204 /* Is it expired? */
3205 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3206 /* Yes so drop it */
3207 if (tp1->data != NULL) {
3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3209 SCTP_SO_NOT_LOCKED);
3210 }
3211 continue;
3212 }
3213 }
3214 }
3215 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3216 /* we are beyond the tsn in the sack */
3217 break;
3218 }
3219 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3220 /* either a RESEND, ACKED, or MARKED */
3221 /* skip */
3222 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3223 /* Continue strikin FWD-TSN chunks */
3224 tp1->rec.data.fwd_tsn_cnt++;
3225 }
3226 continue;
3227 }
3228 /*
3229 * CMT : SFR algo (covers part of DAC and HTNA as well)
3230 */
3231 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3232 /*
3233 * No new acks were receieved for data sent to this
3234 * dest. Therefore, according to the SFR algo for
3235 * CMT, no data sent to this dest can be marked for
3236 * FR using this SACK.
3237 */
3238 continue;
3239 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3240 tp1->whoTo->this_sack_highest_newack)) {
3241 /*
3242 * CMT: New acks were receieved for data sent to
3243 * this dest. But no new acks were seen for data
3244 * sent after tp1. Therefore, according to the SFR
3245 * algo for CMT, tp1 cannot be marked for FR using
3246 * this SACK. This step covers part of the DAC algo
3247 * and the HTNA algo as well.
3248 */
3249 continue;
3250 }
3251 /*
3252 * Here we check to see if we were have already done a FR
3253 * and if so we see if the biggest TSN we saw in the sack is
3254 * smaller than the recovery point. If so we don't strike
3255 * the tsn... otherwise we CAN strike the TSN.
3256 */
3257 /*
3258 * @@@ JRI: Check for CMT if (accum_moved &&
3259 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3260 * 0)) {
3261 */
3262 if (accum_moved && asoc->fast_retran_loss_recovery) {
3263 /*
3264 * Strike the TSN if in fast-recovery and cum-ack
3265 * moved.
3266 */
3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3268 sctp_log_fr(biggest_tsn_newly_acked,
3269 tp1->rec.data.TSN_seq,
3270 tp1->sent,
3271 SCTP_FR_LOG_STRIKE_CHUNK);
3272 }
3273 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3274 tp1->sent++;
3275 }
3276 if ((asoc->sctp_cmt_on_off > 0) &&
3277 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3278 /*
3279 * CMT DAC algorithm: If SACK flag is set to
3280 * 0, then lowest_newack test will not pass
3281 * because it would have been set to the
3282 * cumack earlier. If not already to be
3283 * rtx'd, If not a mixed sack and if tp1 is
3284 * not between two sacked TSNs, then mark by
3285 * one more. NOTE that we are marking by one
3286 * additional time since the SACK DAC flag
3287 * indicates that two packets have been
3288 * received after this missing TSN.
3289 */
3290 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3291 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3293 sctp_log_fr(16 + num_dests_sacked,
3294 tp1->rec.data.TSN_seq,
3295 tp1->sent,
3296 SCTP_FR_LOG_STRIKE_CHUNK);
3297 }
3298 tp1->sent++;
3299 }
3300 }
3301 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3302 (asoc->sctp_cmt_on_off == 0)) {
3303 /*
3304 * For those that have done a FR we must take
3305 * special consideration if we strike. I.e the
3306 * biggest_newly_acked must be higher than the
3307 * sending_seq at the time we did the FR.
3308 */
3309 if (
3310 #ifdef SCTP_FR_TO_ALTERNATE
3311 /*
3312 * If FR's go to new networks, then we must only do
3313 * this for singly homed asoc's. However if the FR's
3314 * go to the same network (Armando's work) then its
3315 * ok to FR multiple times.
3316 */
3317 (asoc->numnets < 2)
3318 #else
3319 (1)
3320 #endif
3321 ) {
3322
3323 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3324 tp1->rec.data.fast_retran_tsn)) {
3325 /*
3326 * Strike the TSN, since this ack is
3327 * beyond where things were when we
3328 * did a FR.
3329 */
3330 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3331 sctp_log_fr(biggest_tsn_newly_acked,
3332 tp1->rec.data.TSN_seq,
3333 tp1->sent,
3334 SCTP_FR_LOG_STRIKE_CHUNK);
3335 }
3336 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3337 tp1->sent++;
3338 }
3339 strike_flag = 1;
3340 if ((asoc->sctp_cmt_on_off > 0) &&
3341 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3342 /*
3343 * CMT DAC algorithm: If
3344 * SACK flag is set to 0,
3345 * then lowest_newack test
3346 * will not pass because it
3347 * would have been set to
3348 * the cumack earlier. If
3349 * not already to be rtx'd,
3350 * If not a mixed sack and
3351 * if tp1 is not between two
3352 * sacked TSNs, then mark by
3353 * one more. NOTE that we
3354 * are marking by one
3355 * additional time since the
3356 * SACK DAC flag indicates
3357 * that two packets have
3358 * been received after this
3359 * missing TSN.
3360 */
3361 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3362 (num_dests_sacked == 1) &&
3363 SCTP_TSN_GT(this_sack_lowest_newack,
3364 tp1->rec.data.TSN_seq)) {
3365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3366 sctp_log_fr(32 + num_dests_sacked,
3367 tp1->rec.data.TSN_seq,
3368 tp1->sent,
3369 SCTP_FR_LOG_STRIKE_CHUNK);
3370 }
3371 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3372 tp1->sent++;
3373 }
3374 }
3375 }
3376 }
3377 }
3378 /*
3379 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3380 * algo covers HTNA.
3381 */
3382 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3383 biggest_tsn_newly_acked)) {
3384 /*
3385 * We don't strike these: This is the HTNA
3386 * algorithm i.e. we don't strike If our TSN is
3387 * larger than the Highest TSN Newly Acked.
3388 */
3389 ;
3390 } else {
3391 /* Strike the TSN */
3392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393 sctp_log_fr(biggest_tsn_newly_acked,
3394 tp1->rec.data.TSN_seq,
3395 tp1->sent,
3396 SCTP_FR_LOG_STRIKE_CHUNK);
3397 }
3398 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3399 tp1->sent++;
3400 }
3401 if ((asoc->sctp_cmt_on_off > 0) &&
3402 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3403 /*
3404 * CMT DAC algorithm: If SACK flag is set to
3405 * 0, then lowest_newack test will not pass
3406 * because it would have been set to the
3407 * cumack earlier. If not already to be
3408 * rtx'd, If not a mixed sack and if tp1 is
3409 * not between two sacked TSNs, then mark by
3410 * one more. NOTE that we are marking by one
3411 * additional time since the SACK DAC flag
3412 * indicates that two packets have been
3413 * received after this missing TSN.
3414 */
3415 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3416 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3418 sctp_log_fr(48 + num_dests_sacked,
3419 tp1->rec.data.TSN_seq,
3420 tp1->sent,
3421 SCTP_FR_LOG_STRIKE_CHUNK);
3422 }
3423 tp1->sent++;
3424 }
3425 }
3426 }
3427 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3428 struct sctp_nets *alt;
3429
3430 /* fix counts and things */
3431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3432 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3433 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3434 tp1->book_size,
3435 (uintptr_t) tp1->whoTo,
3436 tp1->rec.data.TSN_seq);
3437 }
3438 if (tp1->whoTo) {
3439 tp1->whoTo->net_ack++;
3440 sctp_flight_size_decrease(tp1);
3441 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3442 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3443 tp1);
3444 }
3445 }
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3447 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3448 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3449 }
3450 /* add back to the rwnd */
3451 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3452
3453 /* remove from the total flight */
3454 sctp_total_flight_decrease(stcb, tp1);
3455
3456 if ((stcb->asoc.peer_supports_prsctp) &&
3457 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3458 /*
3459 * Has it been retransmitted tv_sec times? -
3460 * we store the retran count there.
3461 */
3462 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3463 /* Yes, so drop it */
3464 if (tp1->data != NULL) {
3465 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3466 SCTP_SO_NOT_LOCKED);
3467 }
3468 /* Make sure to flag we had a FR */
3469 tp1->whoTo->net_ack++;
3470 continue;
3471 }
3472 }
3473 /*
3474 * SCTP_PRINTF("OK, we are now ready to FR this
3475 * guy\n");
3476 */
3477 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3478 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3479 0, SCTP_FR_MARKED);
3480 }
3481 if (strike_flag) {
3482 /* This is a subsequent FR */
3483 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3484 }
3485 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3486 if (asoc->sctp_cmt_on_off > 0) {
3487 /*
3488 * CMT: Using RTX_SSTHRESH policy for CMT.
3489 * If CMT is being used, then pick dest with
3490 * largest ssthresh for any retransmission.
3491 */
3492 tp1->no_fr_allowed = 1;
3493 alt = tp1->whoTo;
3494 /* sa_ignore NO_NULL_CHK */
3495 if (asoc->sctp_cmt_pf > 0) {
3496 /*
3497 * JRS 5/18/07 - If CMT PF is on,
3498 * use the PF version of
3499 * find_alt_net()
3500 */
3501 alt = sctp_find_alternate_net(stcb, alt, 2);
3502 } else {
3503 /*
3504 * JRS 5/18/07 - If only CMT is on,
3505 * use the CMT version of
3506 * find_alt_net()
3507 */
3508 /* sa_ignore NO_NULL_CHK */
3509 alt = sctp_find_alternate_net(stcb, alt, 1);
3510 }
3511 if (alt == NULL) {
3512 alt = tp1->whoTo;
3513 }
3514 /*
3515 * CUCv2: If a different dest is picked for
3516 * the retransmission, then new
3517 * (rtx-)pseudo_cumack needs to be tracked
3518 * for orig dest. Let CUCv2 track new (rtx-)
3519 * pseudo-cumack always.
3520 */
3521 if (tp1->whoTo) {
3522 tp1->whoTo->find_pseudo_cumack = 1;
3523 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3524 }
3525 } else {/* CMT is OFF */
3526
3527 #ifdef SCTP_FR_TO_ALTERNATE
3528 /* Can we find an alternate? */
3529 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3530 #else
3531 /*
3532 * default behavior is to NOT retransmit
3533 * FR's to an alternate. Armando Caro's
3534 * paper details why.
3535 */
3536 alt = tp1->whoTo;
3537 #endif
3538 }
3539
3540 tp1->rec.data.doing_fast_retransmit = 1;
3541 tot_retrans++;
3542 /* mark the sending seq for possible subsequent FR's */
3543 /*
3544 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3545 * (uint32_t)tpi->rec.data.TSN_seq);
3546 */
3547 if (TAILQ_EMPTY(&asoc->send_queue)) {
3548 /*
3549 * If the queue of send is empty then its
3550 * the next sequence number that will be
3551 * assigned so we subtract one from this to
3552 * get the one we last sent.
3553 */
3554 tp1->rec.data.fast_retran_tsn = sending_seq;
3555 } else {
3556 /*
3557 * If there are chunks on the send queue
3558 * (unsent data that has made it from the
3559 * stream queues but not out the door, we
3560 * take the first one (which will have the
3561 * lowest TSN) and subtract one to get the
3562 * one we last sent.
3563 */
3564 struct sctp_tmit_chunk *ttt;
3565
3566 ttt = TAILQ_FIRST(&asoc->send_queue);
3567 tp1->rec.data.fast_retran_tsn =
3568 ttt->rec.data.TSN_seq;
3569 }
3570
3571 if (tp1->do_rtt) {
3572 /*
3573 * this guy had a RTO calculation pending on
3574 * it, cancel it
3575 */
3576 if ((tp1->whoTo != NULL) &&
3577 (tp1->whoTo->rto_needed == 0)) {
3578 tp1->whoTo->rto_needed = 1;
3579 }
3580 tp1->do_rtt = 0;
3581 }
3582 if (alt != tp1->whoTo) {
3583 /* yes, there is an alternate. */
3584 sctp_free_remote_addr(tp1->whoTo);
3585 /* sa_ignore FREED_MEMORY */
3586 tp1->whoTo = alt;
3587 atomic_add_int(&alt->ref_count, 1);
3588 }
3589 }
3590 }
3591 }
3592
3593 struct sctp_tmit_chunk *
3594 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3595 struct sctp_association *asoc)
3596 {
3597 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3598 struct timeval now;
3599 int now_filled = 0;
3600
3601 if (asoc->peer_supports_prsctp == 0) {
3602 return (NULL);
3603 }
3604 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3605 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3606 tp1->sent != SCTP_DATAGRAM_RESEND) {
3607 /* no chance to advance, out of here */
3608 break;
3609 }
3610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3611 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3612 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3613 asoc->advanced_peer_ack_point,
3614 tp1->rec.data.TSN_seq, 0, 0);
3615 }
3616 }
3617 if (!PR_SCTP_ENABLED(tp1->flags)) {
3618 /*
3619 * We can't fwd-tsn past any that are reliable aka
3620 * retransmitted until the asoc fails.
3621 */
3622 break;
3623 }
3624 if (!now_filled) {
3625 (void)SCTP_GETTIME_TIMEVAL(&now);
3626 now_filled = 1;
3627 }
3628 /*
3629 * now we got a chunk which is marked for another
3630 * retransmission to a PR-stream but has run out its chances
3631 * already maybe OR has been marked to skip now. Can we skip
3632 * it if its a resend?
3633 */
3634 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3635 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3636 /*
3637 * Now is this one marked for resend and its time is
3638 * now up?
3639 */
3640 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3641 /* Yes so drop it */
3642 if (tp1->data) {
3643 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3644 1, SCTP_SO_NOT_LOCKED);
3645 }
3646 } else {
3647 /*
3648 * No, we are done when hit one for resend
3649 * whos time as not expired.
3650 */
3651 break;
3652 }
3653 }
3654 /*
3655 * Ok now if this chunk is marked to drop it we can clean up
3656 * the chunk, advance our peer ack point and we can check
3657 * the next chunk.
3658 */
3659 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3660 /* advance PeerAckPoint goes forward */
3661 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3662 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3663 a_adv = tp1;
3664 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3665 /* No update but we do save the chk */
3666 a_adv = tp1;
3667 }
3668 } else {
3669 /*
3670 * If it is still in RESEND we can advance no
3671 * further
3672 */
3673 break;
3674 }
3675 }
3676 return (a_adv);
3677 }
3678
3679 static int
3680 sctp_fs_audit(struct sctp_association *asoc)
3681 {
3682 struct sctp_tmit_chunk *chk;
3683 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3684 int entry_flight, entry_cnt, ret;
3685
3686 entry_flight = asoc->total_flight;
3687 entry_cnt = asoc->total_flight_count;
3688 ret = 0;
3689
3690 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3691 return (0);
3692
3693 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3694 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3695 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3696 chk->rec.data.TSN_seq,
3697 chk->send_size,
3698 chk->snd_count);
3699 inflight++;
3700 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3701 resend++;
3702 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3703 inbetween++;
3704 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3705 above++;
3706 } else {
3707 acked++;
3708 }
3709 }
3710
3711 if ((inflight > 0) || (inbetween > 0)) {
3712 #ifdef INVARIANTS
3713 panic("Flight size-express incorrect? \n");
3714 #else
3715 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3716 entry_flight, entry_cnt);
3717
3718 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3719 inflight, inbetween, resend, above, acked);
3720 ret = 1;
3721 #endif
3722 }
3723 return (ret);
3724 }
3725
3726
3727 static void
3728 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3729 struct sctp_association *asoc,
3730 struct sctp_tmit_chunk *tp1)
3731 {
3732 tp1->window_probe = 0;
3733 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3734 /* TSN's skipped we do NOT move back. */
3735 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3736 tp1->whoTo->flight_size,
3737 tp1->book_size,
3738 (uintptr_t) tp1->whoTo,
3739 tp1->rec.data.TSN_seq);
3740 return;
3741 }
3742 /* First setup this by shrinking flight */
3743 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3744 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3745 tp1);
3746 }
3747 sctp_flight_size_decrease(tp1);
3748 sctp_total_flight_decrease(stcb, tp1);
3749 /* Now mark for resend */
3750 tp1->sent = SCTP_DATAGRAM_RESEND;
3751 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3752
3753 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3754 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3755 tp1->whoTo->flight_size,
3756 tp1->book_size,
3757 (uintptr_t) tp1->whoTo,
3758 tp1->rec.data.TSN_seq);
3759 }
3760 }
3761
3762 void
3763 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3764 uint32_t rwnd, int *abort_now, int ecne_seen)
3765 {
3766 struct sctp_nets *net;
3767 struct sctp_association *asoc;
3768 struct sctp_tmit_chunk *tp1, *tp2;
3769 uint32_t old_rwnd;
3770 int win_probe_recovery = 0;
3771 int win_probe_recovered = 0;
3772 int j, done_once = 0;
3773 int rto_ok = 1;
3774
3775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3776 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3777 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3778 }
3779 SCTP_TCB_LOCK_ASSERT(stcb);
3780 #ifdef SCTP_ASOCLOG_OF_TSNS
3781 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3782 stcb->asoc.cumack_log_at++;
3783 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3784 stcb->asoc.cumack_log_at = 0;
3785 }
3786 #endif
3787 asoc = &stcb->asoc;
3788 old_rwnd = asoc->peers_rwnd;
3789 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3790 /* old ack */
3791 return;
3792 } else if (asoc->last_acked_seq == cumack) {
3793 /* Window update sack */
3794 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3795 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3796 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3797 /* SWS sender side engages */
3798 asoc->peers_rwnd = 0;
3799 }
3800 if (asoc->peers_rwnd > old_rwnd) {
3801 goto again;
3802 }
3803 return;
3804 }
3805 /* First setup for CC stuff */
3806 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3807 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3808 /* Drag along the window_tsn for cwr's */
3809 net->cwr_window_tsn = cumack;
3810 }
3811 net->prev_cwnd = net->cwnd;
3812 net->net_ack = 0;
3813 net->net_ack2 = 0;
3814
3815 /*
3816 * CMT: Reset CUC and Fast recovery algo variables before
3817 * SACK processing
3818 */
3819 net->new_pseudo_cumack = 0;
3820 net->will_exit_fast_recovery = 0;
3821 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3822 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3823 }
3824 }
3825 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3826 uint32_t send_s;
3827
3828 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3829 tp1 = TAILQ_LAST(&asoc->sent_queue,
3830 sctpchunk_listhead);
3831 send_s = tp1->rec.data.TSN_seq + 1;
3832 } else {
3833 send_s = asoc->sending_seq;
3834 }
3835 if (SCTP_TSN_GE(cumack, send_s)) {
3836 #ifndef INVARIANTS
3837 struct mbuf *oper;
3838
3839 #endif
3840 #ifdef INVARIANTS
3841 panic("Impossible sack 1");
3842 #else
3843
3844 *abort_now = 1;
3845 /* XXX */
3846 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3847 0, M_DONTWAIT, 1, MT_DATA);
3848 if (oper) {
3849 struct sctp_paramhdr *ph;
3850 uint32_t *ippp;
3851
3852 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3853 sizeof(uint32_t);
3854 ph = mtod(oper, struct sctp_paramhdr *);
3855 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3856 ph->param_length = htons(SCTP_BUF_LEN(oper));
3857 ippp = (uint32_t *) (ph + 1);
3858 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3859 }
3860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3861 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3862 return;
3863 #endif
3864 }
3865 }
3866 asoc->this_sack_highest_gap = cumack;
3867 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3868 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3869 stcb->asoc.overall_error_count,
3870 0,
3871 SCTP_FROM_SCTP_INDATA,
3872 __LINE__);
3873 }
3874 stcb->asoc.overall_error_count = 0;
3875 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3876 /* process the new consecutive TSN first */
3877 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3878 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3879 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3880 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3881 }
3882 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3883 /*
3884 * If it is less than ACKED, it is
3885 * now no-longer in flight. Higher
3886 * values may occur during marking
3887 */
3888 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3889 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3890 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3891 tp1->whoTo->flight_size,
3892 tp1->book_size,
3893 (uintptr_t) tp1->whoTo,
3894 tp1->rec.data.TSN_seq);
3895 }
3896 sctp_flight_size_decrease(tp1);
3897 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3898 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3899 tp1);
3900 }
3901 /* sa_ignore NO_NULL_CHK */
3902 sctp_total_flight_decrease(stcb, tp1);
3903 }
3904 tp1->whoTo->net_ack += tp1->send_size;
3905 if (tp1->snd_count < 2) {
3906 /*
3907 * True non-retransmited
3908 * chunk
3909 */
3910 tp1->whoTo->net_ack2 +=
3911 tp1->send_size;
3912
3913 /* update RTO too? */
3914 if (tp1->do_rtt) {
3915 if (rto_ok) {
3916 tp1->whoTo->RTO =
3917 /*
3918 * sa_ignore
3919 * NO_NULL_CH
3920 * K
3921 */
3922 sctp_calculate_rto(stcb,
3923 asoc, tp1->whoTo,
3924 &tp1->sent_rcv_time,
3925 sctp_align_safe_nocopy,
3926 SCTP_RTT_FROM_DATA);
3927 rto_ok = 0;
3928 }
3929 if (tp1->whoTo->rto_needed == 0) {
3930 tp1->whoTo->rto_needed = 1;
3931 }
3932 tp1->do_rtt = 0;
3933 }
3934 }
3935 /*
3936 * CMT: CUCv2 algorithm. From the
3937 * cumack'd TSNs, for each TSN being
3938 * acked for the first time, set the
3939 * following variables for the
3940 * corresp destination.
3941 * new_pseudo_cumack will trigger a
3942 * cwnd update.
3943 * find_(rtx_)pseudo_cumack will
3944 * trigger search for the next
3945 * expected (rtx-)pseudo-cumack.
3946 */
3947 tp1->whoTo->new_pseudo_cumack = 1;
3948 tp1->whoTo->find_pseudo_cumack = 1;
3949 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3950
3951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3952 /* sa_ignore NO_NULL_CHK */
3953 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3954 }
3955 }
3956 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3957 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3958 }
3959 if (tp1->rec.data.chunk_was_revoked) {
3960 /* deflate the cwnd */
3961 tp1->whoTo->cwnd -= tp1->book_size;
3962 tp1->rec.data.chunk_was_revoked = 0;
3963 }
3964 tp1->sent = SCTP_DATAGRAM_ACKED;
3965 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3966 if (tp1->data) {
3967 /* sa_ignore NO_NULL_CHK */
3968 sctp_free_bufspace(stcb, asoc, tp1, 1);
3969 sctp_m_freem(tp1->data);
3970 tp1->data = NULL;
3971 }
3972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3973 sctp_log_sack(asoc->last_acked_seq,
3974 cumack,
3975 tp1->rec.data.TSN_seq,
3976 0,
3977 0,
3978 SCTP_LOG_FREE_SENT);
3979 }
3980 asoc->sent_queue_cnt--;
3981 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3982 } else {
3983 break;
3984 }
3985 }
3986
3987 }
3988 /* sa_ignore NO_NULL_CHK */
3989 if (stcb->sctp_socket) {
3990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 struct socket *so;
3992
3993 #endif
3994 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3996 /* sa_ignore NO_NULL_CHK */
3997 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3998 }
3999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4000 so = SCTP_INP_SO(stcb->sctp_ep);
4001 atomic_add_int(&stcb->asoc.refcnt, 1);
4002 SCTP_TCB_UNLOCK(stcb);
4003 SCTP_SOCKET_LOCK(so, 1);
4004 SCTP_TCB_LOCK(stcb);
4005 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4006 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4007 /* assoc was freed while we were unlocked */
4008 SCTP_SOCKET_UNLOCK(so, 1);
4009 return;
4010 }
4011 #endif
4012 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4013 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 SCTP_SOCKET_UNLOCK(so, 1);
4015 #endif
4016 } else {
4017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4018 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4019 }
4020 }
4021
4022 /* JRS - Use the congestion control given in the CC module */
4023 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4024 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4025 if (net->net_ack2 > 0) {
4026 /*
4027 * Karn's rule applies to clearing error
4028 * count, this is optional.
4029 */
4030 net->error_count = 0;
4031 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4032 /* addr came good */
4033 net->dest_state |= SCTP_ADDR_REACHABLE;
4034 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4035 0, (void *)net, SCTP_SO_NOT_LOCKED);
4036 }
4037 if (net == stcb->asoc.primary_destination) {
4038 if (stcb->asoc.alternate) {
4039 /*
4040 * release the alternate,
4041 * primary is good
4042 */
4043 sctp_free_remote_addr(stcb->asoc.alternate);
4044 stcb->asoc.alternate = NULL;
4045 }
4046 }
4047 if (net->dest_state & SCTP_ADDR_PF) {
4048 net->dest_state &= ~SCTP_ADDR_PF;
4049 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4050 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4051 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4052 /* Done with this net */
4053 net->net_ack = 0;
4054 }
4055 /* restore any doubled timers */
4056 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4057 if (net->RTO < stcb->asoc.minrto) {
4058 net->RTO = stcb->asoc.minrto;
4059 }
4060 if (net->RTO > stcb->asoc.maxrto) {
4061 net->RTO = stcb->asoc.maxrto;
4062 }
4063 }
4064 }
4065 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4066 }
4067 asoc->last_acked_seq = cumack;
4068
4069 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4070 /* nothing left in-flight */
4071 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4072 net->flight_size = 0;
4073 net->partial_bytes_acked = 0;
4074 }
4075 asoc->total_flight = 0;
4076 asoc->total_flight_count = 0;
4077 }
4078 /* RWND update */
4079 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4080 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4081 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4082 /* SWS sender side engages */
4083 asoc->peers_rwnd = 0;
4084 }
4085 if (asoc->peers_rwnd > old_rwnd) {
4086 win_probe_recovery = 1;
4087 }
4088 /* Now assure a timer where data is queued at */
4089 again:
4090 j = 0;
4091 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4092 int to_ticks;
4093
4094 if (win_probe_recovery && (net->window_probe)) {
4095 win_probe_recovered = 1;
4096 /*
4097 * Find first chunk that was used with window probe
4098 * and clear the sent
4099 */
4100 /* sa_ignore FREED_MEMORY */
4101 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4102 if (tp1->window_probe) {
4103 /* move back to data send queue */
4104 sctp_window_probe_recovery(stcb, asoc, tp1);
4105 break;
4106 }
4107 }
4108 }
4109 if (net->RTO == 0) {
4110 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4111 } else {
4112 to_ticks = MSEC_TO_TICKS(net->RTO);
4113 }
4114 if (net->flight_size) {
4115 j++;
4116 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4117 sctp_timeout_handler, &net->rxt_timer);
4118 if (net->window_probe) {
4119 net->window_probe = 0;
4120 }
4121 } else {
4122 if (net->window_probe) {
4123 /*
4124 * In window probes we must assure a timer
4125 * is still running there
4126 */
4127 net->window_probe = 0;
4128 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4129 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 sctp_timeout_handler, &net->rxt_timer);
4131 }
4132 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4133 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4134 stcb, net,
4135 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4136 }
4137 }
4138 }
4139 if ((j == 0) &&
4140 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4141 (asoc->sent_queue_retran_cnt == 0) &&
4142 (win_probe_recovered == 0) &&
4143 (done_once == 0)) {
4144 /*
4145 * huh, this should not happen unless all packets are
4146 * PR-SCTP and marked to skip of course.
4147 */
4148 if (sctp_fs_audit(asoc)) {
4149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4150 net->flight_size = 0;
4151 }
4152 asoc->total_flight = 0;
4153 asoc->total_flight_count = 0;
4154 asoc->sent_queue_retran_cnt = 0;
4155 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4156 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4157 sctp_flight_size_increase(tp1);
4158 sctp_total_flight_increase(stcb, tp1);
4159 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4160 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4161 }
4162 }
4163 }
4164 done_once = 1;
4165 goto again;
4166 }
4167 /**********************************/
4168 /* Now what about shutdown issues */
4169 /**********************************/
4170 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4171 /* nothing left on sendqueue.. consider done */
4172 /* clean up */
4173 if ((asoc->stream_queue_cnt == 1) &&
4174 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4175 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4176 (asoc->locked_on_sending)
4177 ) {
4178 struct sctp_stream_queue_pending *sp;
4179
4180 /*
4181 * I may be in a state where we got all across.. but
4182 * cannot write more due to a shutdown... we abort
4183 * since the user did not indicate EOR in this case.
4184 * The sp will be cleaned during free of the asoc.
4185 */
4186 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4187 sctp_streamhead);
4188 if ((sp) && (sp->length == 0)) {
4189 /* Let cleanup code purge it */
4190 if (sp->msg_is_complete) {
4191 asoc->stream_queue_cnt--;
4192 } else {
4193 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4194 asoc->locked_on_sending = NULL;
4195 asoc->stream_queue_cnt--;
4196 }
4197 }
4198 }
4199 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4200 (asoc->stream_queue_cnt == 0)) {
4201 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4202 /* Need to abort here */
4203 struct mbuf *oper;
4204
4205 abort_out_now:
4206 *abort_now = 1;
4207 /* XXX */
4208 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4209 0, M_DONTWAIT, 1, MT_DATA);
4210 if (oper) {
4211 struct sctp_paramhdr *ph;
4212 uint32_t *ippp;
4213
4214 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4215 sizeof(uint32_t);
4216 ph = mtod(oper, struct sctp_paramhdr *);
4217 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4218 ph->param_length = htons(SCTP_BUF_LEN(oper));
4219 ippp = (uint32_t *) (ph + 1);
4220 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4221 }
4222 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4223 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4224 } else {
4225 struct sctp_nets *netp;
4226
4227 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4228 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4229 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4230 }
4231 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4232 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4233 sctp_stop_timers_for_shutdown(stcb);
4234 if (asoc->alternate) {
4235 netp = asoc->alternate;
4236 } else {
4237 netp = asoc->primary_destination;
4238 }
4239 sctp_send_shutdown(stcb, netp);
4240 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4241 stcb->sctp_ep, stcb, netp);
4242 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4243 stcb->sctp_ep, stcb, netp);
4244 }
4245 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4246 (asoc->stream_queue_cnt == 0)) {
4247 struct sctp_nets *netp;
4248
4249 if (asoc->alternate) {
4250 netp = asoc->alternate;
4251 } else {
4252 netp = asoc->primary_destination;
4253 }
4254 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4255 goto abort_out_now;
4256 }
4257 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4258 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4259 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4260 sctp_send_shutdown_ack(stcb, netp);
4261 sctp_stop_timers_for_shutdown(stcb);
4262 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4263 stcb->sctp_ep, stcb, netp);
4264 }
4265 }
4266 /*********************************************/
4267 /* Here we perform PR-SCTP procedures */
4268 /* (section 4.2) */
4269 /*********************************************/
4270 /* C1. update advancedPeerAckPoint */
4271 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4272 asoc->advanced_peer_ack_point = cumack;
4273 }
4274 /* PR-Sctp issues need to be addressed too */
4275 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4276 struct sctp_tmit_chunk *lchk;
4277 uint32_t old_adv_peer_ack_point;
4278
4279 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4280 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4281 /* C3. See if we need to send a Fwd-TSN */
4282 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4283 /*
4284 * ISSUE with ECN, see FWD-TSN processing.
4285 */
4286 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4287 send_forward_tsn(stcb, asoc);
4288 } else if (lchk) {
4289 /* try to FR fwd-tsn's that get lost too */
4290 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4291 send_forward_tsn(stcb, asoc);
4292 }
4293 }
4294 }
4295 if (lchk) {
4296 /* Assure a timer is up */
4297 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4298 stcb->sctp_ep, stcb, lchk->whoTo);
4299 }
4300 }
4301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4302 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4303 rwnd,
4304 stcb->asoc.peers_rwnd,
4305 stcb->asoc.total_flight,
4306 stcb->asoc.total_output_queue_size);
4307 }
4308 }
4309
4310 void
4311 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4312 struct sctp_tcb *stcb,
4313 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4314 int *abort_now, uint8_t flags,
4315 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4316 {
4317 struct sctp_association *asoc;
4318 struct sctp_tmit_chunk *tp1, *tp2;
4319 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4320 uint16_t wake_him = 0;
4321 uint32_t send_s = 0;
4322 long j;
4323 int accum_moved = 0;
4324 int will_exit_fast_recovery = 0;
4325 uint32_t a_rwnd, old_rwnd;
4326 int win_probe_recovery = 0;
4327 int win_probe_recovered = 0;
4328 struct sctp_nets *net = NULL;
4329 int done_once;
4330 int rto_ok = 1;
4331 uint8_t reneged_all = 0;
4332 uint8_t cmt_dac_flag;
4333
4334 /*
4335 * we take any chance we can to service our queues since we cannot
4336 * get awoken when the socket is read from :<
4337 */
4338 /*
4339 * Now perform the actual SACK handling: 1) Verify that it is not an
4340 * old sack, if so discard. 2) If there is nothing left in the send
4341 * queue (cum-ack is equal to last acked) then you have a duplicate
4342 * too, update any rwnd change and verify no timers are running.
4343 * then return. 3) Process any new consequtive data i.e. cum-ack
4344 * moved process these first and note that it moved. 4) Process any
4345 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4346 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4347 * sync up flightsizes and things, stop all timers and also check
4348 * for shutdown_pending state. If so then go ahead and send off the
4349 * shutdown. If in shutdown recv, send off the shutdown-ack and
4350 * start that timer, Ret. 9) Strike any non-acked things and do FR
4351 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4352 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4353 * if in shutdown_recv state.
4354 */
4355 SCTP_TCB_LOCK_ASSERT(stcb);
4356 /* CMT DAC algo */
4357 this_sack_lowest_newack = 0;
4358 SCTP_STAT_INCR(sctps_slowpath_sack);
4359 last_tsn = cum_ack;
4360 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4361 #ifdef SCTP_ASOCLOG_OF_TSNS
4362 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4363 stcb->asoc.cumack_log_at++;
4364 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4365 stcb->asoc.cumack_log_at = 0;
4366 }
4367 #endif
4368 a_rwnd = rwnd;
4369
4370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4371 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4372 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4373 }
4374 old_rwnd = stcb->asoc.peers_rwnd;
4375 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4376 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4377 stcb->asoc.overall_error_count,
4378 0,
4379 SCTP_FROM_SCTP_INDATA,
4380 __LINE__);
4381 }
4382 stcb->asoc.overall_error_count = 0;
4383 asoc = &stcb->asoc;
4384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4385 sctp_log_sack(asoc->last_acked_seq,
4386 cum_ack,
4387 0,
4388 num_seg,
4389 num_dup,
4390 SCTP_LOG_NEW_SACK);
4391 }
4392 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4393 uint16_t i;
4394 uint32_t *dupdata, dblock;
4395
4396 for (i = 0; i < num_dup; i++) {
4397 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4398 sizeof(uint32_t), (uint8_t *) & dblock);
4399 if (dupdata == NULL) {
4400 break;
4401 }
4402 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4403 }
4404 }
4405 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4406 /* reality check */
4407 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4408 tp1 = TAILQ_LAST(&asoc->sent_queue,
4409 sctpchunk_listhead);
4410 send_s = tp1->rec.data.TSN_seq + 1;
4411 } else {
4412 tp1 = NULL;
4413 send_s = asoc->sending_seq;
4414 }
4415 if (SCTP_TSN_GE(cum_ack, send_s)) {
4416 struct mbuf *oper;
4417
4418 /*
4419 * no way, we have not even sent this TSN out yet.
4420 * Peer is hopelessly messed up with us.
4421 */
4422 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4423 cum_ack, send_s);
4424 if (tp1) {
4425 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4426 tp1->rec.data.TSN_seq, tp1);
4427 }
4428 hopeless_peer:
4429 *abort_now = 1;
4430 /* XXX */
4431 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4432 0, M_DONTWAIT, 1, MT_DATA);
4433 if (oper) {
4434 struct sctp_paramhdr *ph;
4435 uint32_t *ippp;
4436
4437 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4438 sizeof(uint32_t);
4439 ph = mtod(oper, struct sctp_paramhdr *);
4440 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4441 ph->param_length = htons(SCTP_BUF_LEN(oper));
4442 ippp = (uint32_t *) (ph + 1);
4443 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4444 }
4445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4447 return;
4448 }
4449 }
4450 /**********************/
4451 /* 1) check the range */
4452 /**********************/
4453 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4454 /* acking something behind */
4455 return;
4456 }
4457 /* update the Rwnd of the peer */
4458 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4459 TAILQ_EMPTY(&asoc->send_queue) &&
4460 (asoc->stream_queue_cnt == 0)) {
4461 /* nothing left on send/sent and strmq */
4462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4463 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4464 asoc->peers_rwnd, 0, 0, a_rwnd);
4465 }
4466 asoc->peers_rwnd = a_rwnd;
4467 if (asoc->sent_queue_retran_cnt) {
4468 asoc->sent_queue_retran_cnt = 0;
4469 }
4470 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4471 /* SWS sender side engages */
4472 asoc->peers_rwnd = 0;
4473 }
4474 /* stop any timers */
4475 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4476 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4477 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4478 net->partial_bytes_acked = 0;
4479 net->flight_size = 0;
4480 }
4481 asoc->total_flight = 0;
4482 asoc->total_flight_count = 0;
4483 return;
4484 }
4485 /*
4486 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4487 * things. The total byte count acked is tracked in netAckSz AND
4488 * netAck2 is used to track the total bytes acked that are un-
4489 * amibguious and were never retransmitted. We track these on a per
4490 * destination address basis.
4491 */
4492 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4493 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4494 /* Drag along the window_tsn for cwr's */
4495 net->cwr_window_tsn = cum_ack;
4496 }
4497 net->prev_cwnd = net->cwnd;
4498 net->net_ack = 0;
4499 net->net_ack2 = 0;
4500
4501 /*
4502 * CMT: Reset CUC and Fast recovery algo variables before
4503 * SACK processing
4504 */
4505 net->new_pseudo_cumack = 0;
4506 net->will_exit_fast_recovery = 0;
4507 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4508 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4509 }
4510 }
4511 /* process the new consecutive TSN first */
4512 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4513 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4514 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4515 accum_moved = 1;
4516 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4517 /*
4518 * If it is less than ACKED, it is
4519 * now no-longer in flight. Higher
4520 * values may occur during marking
4521 */
4522 if ((tp1->whoTo->dest_state &
4523 SCTP_ADDR_UNCONFIRMED) &&
4524 (tp1->snd_count < 2)) {
4525 /*
4526 * If there was no retran
4527 * and the address is
4528 * un-confirmed and we sent
4529 * there and are now
4530 * sacked.. its confirmed,
4531 * mark it so.
4532 */
4533 tp1->whoTo->dest_state &=
4534 ~SCTP_ADDR_UNCONFIRMED;
4535 }
4536 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4538 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4539 tp1->whoTo->flight_size,
4540 tp1->book_size,
4541 (uintptr_t) tp1->whoTo,
4542 tp1->rec.data.TSN_seq);
4543 }
4544 sctp_flight_size_decrease(tp1);
4545 sctp_total_flight_decrease(stcb, tp1);
4546 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4547 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4548 tp1);
4549 }
4550 }
4551 tp1->whoTo->net_ack += tp1->send_size;
4552
4553 /* CMT SFR and DAC algos */
4554 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4555 tp1->whoTo->saw_newack = 1;
4556
4557 if (tp1->snd_count < 2) {
4558 /*
4559 * True non-retransmited
4560 * chunk
4561 */
4562 tp1->whoTo->net_ack2 +=
4563 tp1->send_size;
4564
4565 /* update RTO too? */
4566 if (tp1->do_rtt) {
4567 if (rto_ok) {
4568 tp1->whoTo->RTO =
4569 sctp_calculate_rto(stcb,
4570 asoc, tp1->whoTo,
4571 &tp1->sent_rcv_time,
4572 sctp_align_safe_nocopy,
4573 SCTP_RTT_FROM_DATA);
4574 rto_ok = 0;
4575 }
4576 if (tp1->whoTo->rto_needed == 0) {
4577 tp1->whoTo->rto_needed = 1;
4578 }
4579 tp1->do_rtt = 0;
4580 }
4581 }
4582 /*
4583 * CMT: CUCv2 algorithm. From the
4584 * cumack'd TSNs, for each TSN being
4585 * acked for the first time, set the
4586 * following variables for the
4587 * corresp destination.
4588 * new_pseudo_cumack will trigger a
4589 * cwnd update.
4590 * find_(rtx_)pseudo_cumack will
4591 * trigger search for the next
4592 * expected (rtx-)pseudo-cumack.
4593 */
4594 tp1->whoTo->new_pseudo_cumack = 1;
4595 tp1->whoTo->find_pseudo_cumack = 1;
4596 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4597
4598
4599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4600 sctp_log_sack(asoc->last_acked_seq,
4601 cum_ack,
4602 tp1->rec.data.TSN_seq,
4603 0,
4604 0,
4605 SCTP_LOG_TSN_ACKED);
4606 }
4607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4608 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4609 }
4610 }
4611 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4612 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4613 #ifdef SCTP_AUDITING_ENABLED
4614 sctp_audit_log(0xB3,
4615 (asoc->sent_queue_retran_cnt & 0x000000ff));
4616 #endif
4617 }
4618 if (tp1->rec.data.chunk_was_revoked) {
4619 /* deflate the cwnd */
4620 tp1->whoTo->cwnd -= tp1->book_size;
4621 tp1->rec.data.chunk_was_revoked = 0;
4622 }
4623 tp1->sent = SCTP_DATAGRAM_ACKED;
4624 }
4625 } else {
4626 break;
4627 }
4628 }
4629 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4630 /* always set this up to cum-ack */
4631 asoc->this_sack_highest_gap = last_tsn;
4632
4633 if ((num_seg > 0) || (num_nr_seg > 0)) {
4634
4635 /*
4636 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4637 * to be greater than the cumack. Also reset saw_newack to 0
4638 * for all dests.
4639 */
4640 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4641 net->saw_newack = 0;
4642 net->this_sack_highest_newack = last_tsn;
4643 }
4644
4645 /*
4646 * thisSackHighestGap will increase while handling NEW
4647 * segments this_sack_highest_newack will increase while
4648 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4649 * used for CMT DAC algo. saw_newack will also change.
4650 */
4651 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4652 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4653 num_seg, num_nr_seg, &rto_ok)) {
4654 wake_him++;
4655 }
4656 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4657 /*
4658 * validate the biggest_tsn_acked in the gap acks if
4659 * strict adherence is wanted.
4660 */
4661 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4662 /*
4663 * peer is either confused or we are under
4664 * attack. We must abort.
4665 */
4666 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4667 biggest_tsn_acked, send_s);
4668 goto hopeless_peer;
4669 }
4670 }
4671 }
4672 /*******************************************/
4673 /* cancel ALL T3-send timer if accum moved */
4674 /*******************************************/
4675 if (asoc->sctp_cmt_on_off > 0) {
4676 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4677 if (net->new_pseudo_cumack)
4678 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4679 stcb, net,
4680 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4681
4682 }
4683 } else {
4684 if (accum_moved) {
4685 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4686 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4687 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4688 }
4689 }
4690 }
4691 /********************************************/
4692 /* drop the acked chunks from the sentqueue */
4693 /********************************************/
4694 asoc->last_acked_seq = cum_ack;
4695
4696 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4697 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4698 break;
4699 }
4700 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4701 /* no more sent on list */
4702 SCTP_PRINTF("Warning, tp1->sent == %d and its now acked?\n",
4703 tp1->sent);
4704 }
4705 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4706 if (tp1->pr_sctp_on) {
4707 if (asoc->pr_sctp_cnt != 0)
4708 asoc->pr_sctp_cnt--;
4709 }
4710 asoc->sent_queue_cnt--;
4711 if (tp1->data) {
4712 /* sa_ignore NO_NULL_CHK */
4713 sctp_free_bufspace(stcb, asoc, tp1, 1);
4714 sctp_m_freem(tp1->data);
4715 tp1->data = NULL;
4716 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4717 asoc->sent_queue_cnt_removeable--;
4718 }
4719 }
4720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4721 sctp_log_sack(asoc->last_acked_seq,
4722 cum_ack,
4723 tp1->rec.data.TSN_seq,
4724 0,
4725 0,
4726 SCTP_LOG_FREE_SENT);
4727 }
4728 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4729 wake_him++;
4730 }
4731 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4732 #ifdef INVARIANTS
4733 panic("Warning flight size is postive and should be 0");
4734 #else
4735 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4736 asoc->total_flight);
4737 #endif
4738 asoc->total_flight = 0;
4739 }
4740 /* sa_ignore NO_NULL_CHK */
4741 if ((wake_him) && (stcb->sctp_socket)) {
4742 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4743 struct socket *so;
4744
4745 #endif
4746 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4748 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4749 }
4750 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4751 so = SCTP_INP_SO(stcb->sctp_ep);
4752 atomic_add_int(&stcb->asoc.refcnt, 1);
4753 SCTP_TCB_UNLOCK(stcb);
4754 SCTP_SOCKET_LOCK(so, 1);
4755 SCTP_TCB_LOCK(stcb);
4756 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4757 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4758 /* assoc was freed while we were unlocked */
4759 SCTP_SOCKET_UNLOCK(so, 1);
4760 return;
4761 }
4762 #endif
4763 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4765 SCTP_SOCKET_UNLOCK(so, 1);
4766 #endif
4767 } else {
4768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4769 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4770 }
4771 }
4772
4773 if (asoc->fast_retran_loss_recovery && accum_moved) {
4774 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4775 /* Setup so we will exit RFC2582 fast recovery */
4776 will_exit_fast_recovery = 1;
4777 }
4778 }
4779 /*
4780 * Check for revoked fragments:
4781 *
4782 * if Previous sack - Had no frags then we can't have any revoked if
4783 * Previous sack - Had frag's then - If we now have frags aka
4784 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4785 * some of them. else - The peer revoked all ACKED fragments, since
4786 * we had some before and now we have NONE.
4787 */
4788
4789 if (num_seg) {
4790 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4791 asoc->saw_sack_with_frags = 1;
4792 } else if (asoc->saw_sack_with_frags) {
4793 int cnt_revoked = 0;
4794
4795 /* Peer revoked all dg's marked or acked */
4796 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4797 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4798 tp1->sent = SCTP_DATAGRAM_SENT;
4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4800 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4801 tp1->whoTo->flight_size,
4802 tp1->book_size,
4803 (uintptr_t) tp1->whoTo,
4804 tp1->rec.data.TSN_seq);
4805 }
4806 sctp_flight_size_increase(tp1);
4807 sctp_total_flight_increase(stcb, tp1);
4808 tp1->rec.data.chunk_was_revoked = 1;
4809 /*
4810 * To ensure that this increase in
4811 * flightsize, which is artificial, does not
4812 * throttle the sender, we also increase the
4813 * cwnd artificially.
4814 */
4815 tp1->whoTo->cwnd += tp1->book_size;
4816 cnt_revoked++;
4817 }
4818 }
4819 if (cnt_revoked) {
4820 reneged_all = 1;
4821 }
4822 asoc->saw_sack_with_frags = 0;
4823 }
4824 if (num_nr_seg > 0)
4825 asoc->saw_sack_with_nr_frags = 1;
4826 else
4827 asoc->saw_sack_with_nr_frags = 0;
4828
4829 /* JRS - Use the congestion control given in the CC module */
4830 if (ecne_seen == 0) {
4831 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4832 if (net->net_ack2 > 0) {
4833 /*
4834 * Karn's rule applies to clearing error
4835 * count, this is optional.
4836 */
4837 net->error_count = 0;
4838 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4839 /* addr came good */
4840 net->dest_state |= SCTP_ADDR_REACHABLE;
4841 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4842 0, (void *)net, SCTP_SO_NOT_LOCKED);
4843 }
4844 if (net == stcb->asoc.primary_destination) {
4845 if (stcb->asoc.alternate) {
4846 /*
4847 * release the alternate,
4848 * primary is good
4849 */
4850 sctp_free_remote_addr(stcb->asoc.alternate);
4851 stcb->asoc.alternate = NULL;
4852 }
4853 }
4854 if (net->dest_state & SCTP_ADDR_PF) {
4855 net->dest_state &= ~SCTP_ADDR_PF;
4856 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4857 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4858 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4859 /* Done with this net */
4860 net->net_ack = 0;
4861 }
4862 /* restore any doubled timers */
4863 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4864 if (net->RTO < stcb->asoc.minrto) {
4865 net->RTO = stcb->asoc.minrto;
4866 }
4867 if (net->RTO > stcb->asoc.maxrto) {
4868 net->RTO = stcb->asoc.maxrto;
4869 }
4870 }
4871 }
4872 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4873 }
4874 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4875 /* nothing left in-flight */
4876 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4877 /* stop all timers */
4878 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4879 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4880 net->flight_size = 0;
4881 net->partial_bytes_acked = 0;
4882 }
4883 asoc->total_flight = 0;
4884 asoc->total_flight_count = 0;
4885 }
4886 /**********************************/
4887 /* Now what about shutdown issues */
4888 /**********************************/
4889 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4890 /* nothing left on sendqueue.. consider done */
4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4892 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4893 asoc->peers_rwnd, 0, 0, a_rwnd);
4894 }
4895 asoc->peers_rwnd = a_rwnd;
4896 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4897 /* SWS sender side engages */
4898 asoc->peers_rwnd = 0;
4899 }
4900 /* clean up */
4901 if ((asoc->stream_queue_cnt == 1) &&
4902 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4903 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4904 (asoc->locked_on_sending)
4905 ) {
4906 struct sctp_stream_queue_pending *sp;
4907
4908 /*
4909 * I may be in a state where we got all across.. but
4910 * cannot write more due to a shutdown... we abort
4911 * since the user did not indicate EOR in this case.
4912 */
4913 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4914 sctp_streamhead);
4915 if ((sp) && (sp->length == 0)) {
4916 asoc->locked_on_sending = NULL;
4917 if (sp->msg_is_complete) {
4918 asoc->stream_queue_cnt--;
4919 } else {
4920 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4921 asoc->stream_queue_cnt--;
4922 }
4923 }
4924 }
4925 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4926 (asoc->stream_queue_cnt == 0)) {
4927 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4928 /* Need to abort here */
4929 struct mbuf *oper;
4930
4931 abort_out_now:
4932 *abort_now = 1;
4933 /* XXX */
4934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4935 0, M_DONTWAIT, 1, MT_DATA);
4936 if (oper) {
4937 struct sctp_paramhdr *ph;
4938 uint32_t *ippp;
4939
4940 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4941 sizeof(uint32_t);
4942 ph = mtod(oper, struct sctp_paramhdr *);
4943 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4944 ph->param_length = htons(SCTP_BUF_LEN(oper));
4945 ippp = (uint32_t *) (ph + 1);
4946 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4947 }
4948 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4949 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4950 return;
4951 } else {
4952 struct sctp_nets *netp;
4953
4954 if (asoc->alternate) {
4955 netp = asoc->alternate;
4956 } else {
4957 netp = asoc->primary_destination;
4958 }
4959 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4960 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4961 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4962 }
4963 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4964 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4965 sctp_stop_timers_for_shutdown(stcb);
4966 sctp_send_shutdown(stcb, netp);
4967 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4968 stcb->sctp_ep, stcb, netp);
4969 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4970 stcb->sctp_ep, stcb, netp);
4971 }
4972 return;
4973 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4974 (asoc->stream_queue_cnt == 0)) {
4975 struct sctp_nets *netp;
4976
4977 if (asoc->alternate) {
4978 netp = asoc->alternate;
4979 } else {
4980 netp = asoc->primary_destination;
4981 }
4982 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4983 goto abort_out_now;
4984 }
4985 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4986 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4987 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4988 sctp_send_shutdown_ack(stcb, netp);
4989 sctp_stop_timers_for_shutdown(stcb);
4990 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4991 stcb->sctp_ep, stcb, netp);
4992 return;
4993 }
4994 }
4995 /*
4996 * Now here we are going to recycle net_ack for a different use...
4997 * HEADS UP.
4998 */
4999 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5000 net->net_ack = 0;
5001 }
5002
5003 /*
5004 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5005 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5006 * automatically ensure that.
5007 */
5008 if ((asoc->sctp_cmt_on_off > 0) &&
5009 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5010 (cmt_dac_flag == 0)) {
5011 this_sack_lowest_newack = cum_ack;
5012 }
5013 if ((num_seg > 0) || (num_nr_seg > 0)) {
5014 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5015 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5016 }
5017 /* JRS - Use the congestion control given in the CC module */
5018 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5019
5020 /* Now are we exiting loss recovery ? */
5021 if (will_exit_fast_recovery) {
5022 /* Ok, we must exit fast recovery */
5023 asoc->fast_retran_loss_recovery = 0;
5024 }
5025 if ((asoc->sat_t3_loss_recovery) &&
5026 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5027 /* end satellite t3 loss recovery */
5028 asoc->sat_t3_loss_recovery = 0;
5029 }
5030 /*
5031 * CMT Fast recovery
5032 */
5033 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5034 if (net->will_exit_fast_recovery) {
5035 /* Ok, we must exit fast recovery */
5036 net->fast_retran_loss_recovery = 0;
5037 }
5038 }
5039
5040 /* Adjust and set the new rwnd value */
5041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5042 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5043 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5044 }
5045 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5046 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5047 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5048 /* SWS sender side engages */
5049 asoc->peers_rwnd = 0;
5050 }
5051 if (asoc->peers_rwnd > old_rwnd) {
5052 win_probe_recovery = 1;
5053 }
5054 /*
5055 * Now we must setup so we have a timer up for anyone with
5056 * outstanding data.
5057 */
5058 done_once = 0;
5059 again:
5060 j = 0;
5061 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5062 if (win_probe_recovery && (net->window_probe)) {
5063 win_probe_recovered = 1;
5064 /*-
5065 * Find first chunk that was used with
5066 * window probe and clear the event. Put
5067 * it back into the send queue as if has
5068 * not been sent.
5069 */
5070 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5071 if (tp1->window_probe) {
5072 sctp_window_probe_recovery(stcb, asoc, tp1);
5073 break;
5074 }
5075 }
5076 }
5077 if (net->flight_size) {
5078 j++;
5079 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5080 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5081 stcb->sctp_ep, stcb, net);
5082 }
5083 if (net->window_probe) {
5084 net->window_probe = 0;
5085 }
5086 } else {
5087 if (net->window_probe) {
5088 /*
5089 * In window probes we must assure a timer
5090 * is still running there
5091 */
5092 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5093 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5094 stcb->sctp_ep, stcb, net);
5095
5096 }
5097 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5098 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5099 stcb, net,
5100 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5101 }
5102 }
5103 }
5104 if ((j == 0) &&
5105 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5106 (asoc->sent_queue_retran_cnt == 0) &&
5107 (win_probe_recovered == 0) &&
5108 (done_once == 0)) {
5109 /*
5110 * huh, this should not happen unless all packets are
5111 * PR-SCTP and marked to skip of course.
5112 */
5113 if (sctp_fs_audit(asoc)) {
5114 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115 net->flight_size = 0;
5116 }
5117 asoc->total_flight = 0;
5118 asoc->total_flight_count = 0;
5119 asoc->sent_queue_retran_cnt = 0;
5120 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5121 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5122 sctp_flight_size_increase(tp1);
5123 sctp_total_flight_increase(stcb, tp1);
5124 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5125 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5126 }
5127 }
5128 }
5129 done_once = 1;
5130 goto again;
5131 }
5132 /*********************************************/
5133 /* Here we perform PR-SCTP procedures */
5134 /* (section 4.2) */
5135 /*********************************************/
5136 /* C1. update advancedPeerAckPoint */
5137 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5138 asoc->advanced_peer_ack_point = cum_ack;
5139 }
5140 /* C2. try to further move advancedPeerAckPoint ahead */
5141 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5142 struct sctp_tmit_chunk *lchk;
5143 uint32_t old_adv_peer_ack_point;
5144
5145 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5146 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5147 /* C3. See if we need to send a Fwd-TSN */
5148 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5149 /*
5150 * ISSUE with ECN, see FWD-TSN processing.
5151 */
5152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5153 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5154 0xee, cum_ack, asoc->advanced_peer_ack_point,
5155 old_adv_peer_ack_point);
5156 }
5157 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5158 send_forward_tsn(stcb, asoc);
5159 } else if (lchk) {
5160 /* try to FR fwd-tsn's that get lost too */
5161 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5162 send_forward_tsn(stcb, asoc);
5163 }
5164 }
5165 }
5166 if (lchk) {
5167 /* Assure a timer is up */
5168 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5169 stcb->sctp_ep, stcb, lchk->whoTo);
5170 }
5171 }
5172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5173 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5174 a_rwnd,
5175 stcb->asoc.peers_rwnd,
5176 stcb->asoc.total_flight,
5177 stcb->asoc.total_output_queue_size);
5178 }
5179 }
5180
5181 void
5182 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5183 {
5184 /* Copy cum-ack */
5185 uint32_t cum_ack, a_rwnd;
5186
5187 cum_ack = ntohl(cp->cumulative_tsn_ack);
5188 /* Arrange so a_rwnd does NOT change */
5189 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5190
5191 /* Now call the express sack handling */
5192 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5193 }
5194
5195 static void
5196 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5197 struct sctp_stream_in *strmin)
5198 {
5199 struct sctp_queued_to_read *ctl, *nctl;
5200 struct sctp_association *asoc;
5201 uint16_t tt;
5202
5203 asoc = &stcb->asoc;
5204 tt = strmin->last_sequence_delivered;
5205 /*
5206 * First deliver anything prior to and including the stream no that
5207 * came in
5208 */
5209 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5210 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5211 /* this is deliverable now */
5212 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5213 /* subtract pending on streams */
5214 asoc->size_on_all_streams -= ctl->length;
5215 sctp_ucount_decr(asoc->cnt_on_all_streams);
5216 /* deliver it to at least the delivery-q */
5217 if (stcb->sctp_socket) {
5218 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5219 sctp_add_to_readq(stcb->sctp_ep, stcb,
5220 ctl,
5221 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5222 }
5223 } else {
5224 /* no more delivery now. */
5225 break;
5226 }
5227 }
5228 /*
5229 * now we must deliver things in queue the normal way if any are
5230 * now ready.
5231 */
5232 tt = strmin->last_sequence_delivered + 1;
5233 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5234 if (tt == ctl->sinfo_ssn) {
5235 /* this is deliverable now */
5236 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5237 /* subtract pending on streams */
5238 asoc->size_on_all_streams -= ctl->length;
5239 sctp_ucount_decr(asoc->cnt_on_all_streams);
5240 /* deliver it to at least the delivery-q */
5241 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5242 if (stcb->sctp_socket) {
5243 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5244 sctp_add_to_readq(stcb->sctp_ep, stcb,
5245 ctl,
5246 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5247
5248 }
5249 tt = strmin->last_sequence_delivered + 1;
5250 } else {
5251 break;
5252 }
5253 }
5254 }
5255
5256 static void
5257 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5258 struct sctp_association *asoc,
5259 uint16_t stream, uint16_t seq)
5260 {
5261 struct sctp_tmit_chunk *chk, *nchk;
5262
5263 /* For each one on here see if we need to toss it */
5264 /*
5265 * For now large messages held on the reasmqueue that are complete
5266 * will be tossed too. We could in theory do more work to spin
5267 * through and stop after dumping one msg aka seeing the start of a
5268 * new msg at the head, and call the delivery function... to see if
5269 * it can be delivered... But for now we just dump everything on the
5270 * queue.
5271 */
5272 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5273 /*
5274 * Do not toss it if on a different stream or marked for
5275 * unordered delivery in which case the stream sequence
5276 * number has no meaning.
5277 */
5278 if ((chk->rec.data.stream_number != stream) ||
5279 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5280 continue;
5281 }
5282 if (chk->rec.data.stream_seq == seq) {
5283 /* It needs to be tossed */
5284 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5285 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5286 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5287 asoc->str_of_pdapi = chk->rec.data.stream_number;
5288 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5289 asoc->fragment_flags = chk->rec.data.rcv_flags;
5290 }
5291 asoc->size_on_reasm_queue -= chk->send_size;
5292 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5293
5294 /* Clear up any stream problem */
5295 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5296 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5297 /*
5298 * We must dump forward this streams
5299 * sequence number if the chunk is not
5300 * unordered that is being skipped. There is
5301 * a chance that if the peer does not
5302 * include the last fragment in its FWD-TSN
5303 * we WILL have a problem here since you
5304 * would have a partial chunk in queue that
5305 * may not be deliverable. Also if a Partial
5306 * delivery API as started the user may get
5307 * a partial chunk. The next read returning
5308 * a new chunk... really ugly but I see no
5309 * way around it! Maybe a notify??
5310 */
5311 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5312 }
5313 if (chk->data) {
5314 sctp_m_freem(chk->data);
5315 chk->data = NULL;
5316 }
5317 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5318 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5319 /*
5320 * If the stream_seq is > than the purging one, we
5321 * are done
5322 */
5323 break;
5324 }
5325 }
5326 }
5327
5328
5329 void
5330 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5331 struct sctp_forward_tsn_chunk *fwd,
5332 int *abort_flag, struct mbuf *m, int offset)
5333 {
5334 /* The pr-sctp fwd tsn */
5335 /*
5336 * here we will perform all the data receiver side steps for
5337 * processing FwdTSN, as required in by pr-sctp draft:
5338 *
5339 * Assume we get FwdTSN(x):
5340 *
5341 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5342 * others we have 3) examine and update re-ordering queue on
5343 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5344 * report where we are.
5345 */
5346 struct sctp_association *asoc;
5347 uint32_t new_cum_tsn, gap;
5348 unsigned int i, fwd_sz, m_size;
5349 uint32_t str_seq;
5350 struct sctp_stream_in *strm;
5351 struct sctp_tmit_chunk *chk, *nchk;
5352 struct sctp_queued_to_read *ctl, *sv;
5353
5354 asoc = &stcb->asoc;
5355 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5356 SCTPDBG(SCTP_DEBUG_INDATA1,
5357 "Bad size too small/big fwd-tsn\n");
5358 return;
5359 }
5360 m_size = (stcb->asoc.mapping_array_size << 3);
5361 /*************************************************************/
5362 /* 1. Here we update local cumTSN and shift the bitmap array */
5363 /*************************************************************/
5364 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5365
5366 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5367 /* Already got there ... */
5368 return;
5369 }
5370 /*
5371 * now we know the new TSN is more advanced, let's find the actual
5372 * gap
5373 */
5374 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5375 asoc->cumulative_tsn = new_cum_tsn;
5376 if (gap >= m_size) {
5377 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5378 struct mbuf *oper;
5379
5380 /*
5381 * out of range (of single byte chunks in the rwnd I
5382 * give out). This must be an attacker.
5383 */
5384 *abort_flag = 1;
5385 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5386 0, M_DONTWAIT, 1, MT_DATA);
5387 if (oper) {
5388 struct sctp_paramhdr *ph;
5389 uint32_t *ippp;
5390
5391 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5392 (sizeof(uint32_t) * 3);
5393 ph = mtod(oper, struct sctp_paramhdr *);
5394 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5395 ph->param_length = htons(SCTP_BUF_LEN(oper));
5396 ippp = (uint32_t *) (ph + 1);
5397 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5398 ippp++;
5399 *ippp = asoc->highest_tsn_inside_map;
5400 ippp++;
5401 *ippp = new_cum_tsn;
5402 }
5403 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5404 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5405 return;
5406 }
5407 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5408
5409 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5410 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5411 asoc->highest_tsn_inside_map = new_cum_tsn;
5412
5413 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5414 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5415
5416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5417 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5418 }
5419 } else {
5420 SCTP_TCB_LOCK_ASSERT(stcb);
5421 for (i = 0; i <= gap; i++) {
5422 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5423 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5424 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5425 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5426 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5427 }
5428 }
5429 }
5430 }
5431 /*************************************************************/
5432 /* 2. Clear up re-assembly queue */
5433 /*************************************************************/
5434 /*
5435 * First service it if pd-api is up, just in case we can progress it
5436 * forward
5437 */
5438 if (asoc->fragmented_delivery_inprogress) {
5439 sctp_service_reassembly(stcb, asoc);
5440 }
5441 /* For each one on here see if we need to toss it */
5442 /*
5443 * For now large messages held on the reasmqueue that are complete
5444 * will be tossed too. We could in theory do more work to spin
5445 * through and stop after dumping one msg aka seeing the start of a
5446 * new msg at the head, and call the delivery function... to see if
5447 * it can be delivered... But for now we just dump everything on the
5448 * queue.
5449 */
5450 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5451 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5452 /* It needs to be tossed */
5453 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5454 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5455 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5456 asoc->str_of_pdapi = chk->rec.data.stream_number;
5457 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5458 asoc->fragment_flags = chk->rec.data.rcv_flags;
5459 }
5460 asoc->size_on_reasm_queue -= chk->send_size;
5461 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5462
5463 /* Clear up any stream problem */
5464 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5465 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5466 /*
5467 * We must dump forward this streams
5468 * sequence number if the chunk is not
5469 * unordered that is being skipped. There is
5470 * a chance that if the peer does not
5471 * include the last fragment in its FWD-TSN
5472 * we WILL have a problem here since you
5473 * would have a partial chunk in queue that
5474 * may not be deliverable. Also if a Partial
5475 * delivery API as started the user may get
5476 * a partial chunk. The next read returning
5477 * a new chunk... really ugly but I see no
5478 * way around it! Maybe a notify??
5479 */
5480 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5481 }
5482 if (chk->data) {
5483 sctp_m_freem(chk->data);
5484 chk->data = NULL;
5485 }
5486 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5487 } else {
5488 /*
5489 * Ok we have gone beyond the end of the fwd-tsn's
5490 * mark.
5491 */
5492 break;
5493 }
5494 }
5495 /*******************************************************/
5496 /* 3. Update the PR-stream re-ordering queues and fix */
5497 /* delivery issues as needed. */
5498 /*******************************************************/
5499 fwd_sz -= sizeof(*fwd);
5500 if (m && fwd_sz) {
5501 /* New method. */
5502 unsigned int num_str;
5503 struct sctp_strseq *stseq, strseqbuf;
5504
5505 offset += sizeof(*fwd);
5506
5507 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5508 num_str = fwd_sz / sizeof(struct sctp_strseq);
5509 for (i = 0; i < num_str; i++) {
5510 uint16_t st;
5511
5512 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5513 sizeof(struct sctp_strseq),
5514 (uint8_t *) & strseqbuf);
5515 offset += sizeof(struct sctp_strseq);
5516 if (stseq == NULL) {
5517 break;
5518 }
5519 /* Convert */
5520 st = ntohs(stseq->stream);
5521 stseq->stream = st;
5522 st = ntohs(stseq->sequence);
5523 stseq->sequence = st;
5524
5525 /* now process */
5526
5527 /*
5528 * Ok we now look for the stream/seq on the read
5529 * queue where its not all delivered. If we find it
5530 * we transmute the read entry into a PDI_ABORTED.
5531 */
5532 if (stseq->stream >= asoc->streamincnt) {
5533 /* screwed up streams, stop! */
5534 break;
5535 }
5536 if ((asoc->str_of_pdapi == stseq->stream) &&
5537 (asoc->ssn_of_pdapi == stseq->sequence)) {
5538 /*
5539 * If this is the one we were partially
5540 * delivering now then we no longer are.
5541 * Note this will change with the reassembly
5542 * re-write.
5543 */
5544 asoc->fragmented_delivery_inprogress = 0;
5545 }
5546 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5547 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5548 if ((ctl->sinfo_stream == stseq->stream) &&
5549 (ctl->sinfo_ssn == stseq->sequence)) {
5550 str_seq = (stseq->stream << 16) | stseq->sequence;
5551 ctl->end_added = 1;
5552 ctl->pdapi_aborted = 1;
5553 sv = stcb->asoc.control_pdapi;
5554 stcb->asoc.control_pdapi = ctl;
5555 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5556 stcb,
5557 SCTP_PARTIAL_DELIVERY_ABORTED,
5558 (void *)&str_seq,
5559 SCTP_SO_NOT_LOCKED);
5560 stcb->asoc.control_pdapi = sv;
5561 break;
5562 } else if ((ctl->sinfo_stream == stseq->stream) &&
5563 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5564 /* We are past our victim SSN */
5565 break;
5566 }
5567 }
5568 strm = &asoc->strmin[stseq->stream];
5569 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5570 /* Update the sequence number */
5571 strm->last_sequence_delivered = stseq->sequence;
5572 }
5573 /* now kick the stream the new way */
5574 /* sa_ignore NO_NULL_CHK */
5575 sctp_kick_prsctp_reorder_queue(stcb, strm);
5576 }
5577 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5578 }
5579 /*
5580 * Now slide thing forward.
5581 */
5582 sctp_slide_mapping_arrays(stcb);
5583
5584 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5585 /* now lets kick out and check for more fragmented delivery */
5586 /* sa_ignore NO_NULL_CHK */
5587 sctp_deliver_reasm_check(stcb, &stcb->asoc);
5588 }
5589 }
Cache object: b2cc1b4fd292a1da3841fdf59771196b
|