FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_lro.c
1 /*-
2 * Copyright (c) 2007, Myricom Inc.
3 * Copyright (c) 2008, Intel Corporation.
4 * Copyright (c) 2012 The FreeBSD Foundation
5 * Copyright (c) 2016 Mellanox Technologies.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Bjoern Zeeb
9 * under sponsorship from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.1/sys/netinet/tcp_lro.c 318772 2017-05-24 03:56:48Z np $");
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sysctl.h>
46
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/ethernet.h>
50 #include <net/vnet.h>
51
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip6.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_lro.h>
59 #include <netinet/tcp_var.h>
60
61 #include <netinet6/ip6_var.h>
62
63 #include <machine/in_cksum.h>
64
65 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
66
67 #define TCP_LRO_UPDATE_CSUM 1
68 #ifndef TCP_LRO_UPDATE_CSUM
69 #define TCP_LRO_INVALID_CSUM 0x0000
70 #endif
71
72 static void tcp_lro_rx_done(struct lro_ctrl *lc);
73
74 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75 "TCP LRO");
76
77 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
78 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
79 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
80 "default number of LRO entries");
81
82 static __inline void
83 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_entry *le)
84 {
85
86 LIST_INSERT_HEAD(&lc->lro_active, le, next);
87 }
88
89 static __inline void
90 tcp_lro_active_remove(struct lro_entry *le)
91 {
92
93 LIST_REMOVE(le, next);
94 }
95
96 int
97 tcp_lro_init(struct lro_ctrl *lc)
98 {
99 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
100 }
101
102 int
103 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
104 unsigned lro_entries, unsigned lro_mbufs)
105 {
106 struct lro_entry *le;
107 size_t size;
108 unsigned i;
109
110 lc->lro_bad_csum = 0;
111 lc->lro_queued = 0;
112 lc->lro_flushed = 0;
113 lc->lro_mbuf_count = 0;
114 lc->lro_mbuf_max = lro_mbufs;
115 lc->lro_cnt = lro_entries;
116 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
117 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
118 lc->ifp = ifp;
119 LIST_INIT(&lc->lro_free);
120 LIST_INIT(&lc->lro_active);
121
122 /* compute size to allocate */
123 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
124 (lro_entries * sizeof(*le));
125 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
126 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
127
128 /* check for out of memory */
129 if (lc->lro_mbuf_data == NULL) {
130 memset(lc, 0, sizeof(*lc));
131 return (ENOMEM);
132 }
133 /* compute offset for LRO entries */
134 le = (struct lro_entry *)
135 (lc->lro_mbuf_data + lro_mbufs);
136
137 /* setup linked list */
138 for (i = 0; i != lro_entries; i++)
139 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
140
141 return (0);
142 }
143
144 void
145 tcp_lro_free(struct lro_ctrl *lc)
146 {
147 struct lro_entry *le;
148 unsigned x;
149
150 /* reset LRO free list */
151 LIST_INIT(&lc->lro_free);
152
153 /* free active mbufs, if any */
154 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
155 tcp_lro_active_remove(le);
156 m_freem(le->m_head);
157 }
158
159 /* free mbuf array, if any */
160 for (x = 0; x != lc->lro_mbuf_count; x++)
161 m_freem(lc->lro_mbuf_data[x].mb);
162 lc->lro_mbuf_count = 0;
163
164 /* free allocated memory, if any */
165 free(lc->lro_mbuf_data, M_LRO);
166 lc->lro_mbuf_data = NULL;
167 }
168
169 #ifdef TCP_LRO_UPDATE_CSUM
170 static uint16_t
171 tcp_lro_csum_th(struct tcphdr *th)
172 {
173 uint32_t ch;
174 uint16_t *p, l;
175
176 ch = th->th_sum = 0x0000;
177 l = th->th_off;
178 p = (uint16_t *)th;
179 while (l > 0) {
180 ch += *p;
181 p++;
182 ch += *p;
183 p++;
184 l--;
185 }
186 while (ch > 0xffff)
187 ch = (ch >> 16) + (ch & 0xffff);
188
189 return (ch & 0xffff);
190 }
191
192 static uint16_t
193 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
194 uint16_t tcp_data_len, uint16_t csum)
195 {
196 uint32_t c;
197 uint16_t cs;
198
199 c = csum;
200
201 /* Remove length from checksum. */
202 switch (le->eh_type) {
203 #ifdef INET6
204 case ETHERTYPE_IPV6:
205 {
206 struct ip6_hdr *ip6;
207
208 ip6 = (struct ip6_hdr *)l3hdr;
209 if (le->append_cnt == 0)
210 cs = ip6->ip6_plen;
211 else {
212 uint32_t cx;
213
214 cx = ntohs(ip6->ip6_plen);
215 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0);
216 }
217 break;
218 }
219 #endif
220 #ifdef INET
221 case ETHERTYPE_IP:
222 {
223 struct ip *ip4;
224
225 ip4 = (struct ip *)l3hdr;
226 if (le->append_cnt == 0)
227 cs = ip4->ip_len;
228 else {
229 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4),
230 IPPROTO_TCP);
231 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr,
232 htons(cs));
233 }
234 break;
235 }
236 #endif
237 default:
238 cs = 0; /* Keep compiler happy. */
239 }
240
241 cs = ~cs;
242 c += cs;
243
244 /* Remove TCP header csum. */
245 cs = ~tcp_lro_csum_th(th);
246 c += cs;
247 while (c > 0xffff)
248 c = (c >> 16) + (c & 0xffff);
249
250 return (c & 0xffff);
251 }
252 #endif
253
254 static void
255 tcp_lro_rx_done(struct lro_ctrl *lc)
256 {
257 struct lro_entry *le;
258
259 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
260 tcp_lro_active_remove(le);
261 tcp_lro_flush(lc, le);
262 }
263 }
264
265 void
266 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
267 {
268 struct lro_entry *le, *le_tmp;
269 struct timeval tv;
270
271 if (LIST_EMPTY(&lc->lro_active))
272 return;
273
274 getmicrotime(&tv);
275 timevalsub(&tv, timeout);
276 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
277 if (timevalcmp(&tv, &le->mtime, >=)) {
278 tcp_lro_active_remove(le);
279 tcp_lro_flush(lc, le);
280 }
281 }
282 }
283
284 void
285 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
286 {
287
288 if (le->append_cnt > 0) {
289 struct tcphdr *th;
290 uint16_t p_len;
291
292 p_len = htons(le->p_len);
293 switch (le->eh_type) {
294 #ifdef INET6
295 case ETHERTYPE_IPV6:
296 {
297 struct ip6_hdr *ip6;
298
299 ip6 = le->le_ip6;
300 ip6->ip6_plen = p_len;
301 th = (struct tcphdr *)(ip6 + 1);
302 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
303 CSUM_PSEUDO_HDR;
304 le->p_len += ETHER_HDR_LEN + sizeof(*ip6);
305 break;
306 }
307 #endif
308 #ifdef INET
309 case ETHERTYPE_IP:
310 {
311 struct ip *ip4;
312 #ifdef TCP_LRO_UPDATE_CSUM
313 uint32_t cl;
314 uint16_t c;
315 #endif
316
317 ip4 = le->le_ip4;
318 #ifdef TCP_LRO_UPDATE_CSUM
319 /* Fix IP header checksum for new length. */
320 c = ~ip4->ip_sum;
321 cl = c;
322 c = ~ip4->ip_len;
323 cl += c + p_len;
324 while (cl > 0xffff)
325 cl = (cl >> 16) + (cl & 0xffff);
326 c = cl;
327 ip4->ip_sum = ~c;
328 #else
329 ip4->ip_sum = TCP_LRO_INVALID_CSUM;
330 #endif
331 ip4->ip_len = p_len;
332 th = (struct tcphdr *)(ip4 + 1);
333 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
334 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
335 le->p_len += ETHER_HDR_LEN;
336 break;
337 }
338 #endif
339 default:
340 th = NULL; /* Keep compiler happy. */
341 }
342 le->m_head->m_pkthdr.csum_data = 0xffff;
343 le->m_head->m_pkthdr.len = le->p_len;
344
345 /* Incorporate the latest ACK into the TCP header. */
346 th->th_ack = le->ack_seq;
347 th->th_win = le->window;
348 /* Incorporate latest timestamp into the TCP header. */
349 if (le->timestamp != 0) {
350 uint32_t *ts_ptr;
351
352 ts_ptr = (uint32_t *)(th + 1);
353 ts_ptr[1] = htonl(le->tsval);
354 ts_ptr[2] = le->tsecr;
355 }
356 #ifdef TCP_LRO_UPDATE_CSUM
357 /* Update the TCP header checksum. */
358 le->ulp_csum += p_len;
359 le->ulp_csum += tcp_lro_csum_th(th);
360 while (le->ulp_csum > 0xffff)
361 le->ulp_csum = (le->ulp_csum >> 16) +
362 (le->ulp_csum & 0xffff);
363 th->th_sum = (le->ulp_csum & 0xffff);
364 th->th_sum = ~th->th_sum;
365 #else
366 th->th_sum = TCP_LRO_INVALID_CSUM;
367 #endif
368 }
369
370 (*lc->ifp->if_input)(lc->ifp, le->m_head);
371 lc->lro_queued += le->append_cnt + 1;
372 lc->lro_flushed++;
373 bzero(le, sizeof(*le));
374 LIST_INSERT_HEAD(&lc->lro_free, le, next);
375 }
376
377 #ifdef HAVE_INLINE_FLSLL
378 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
379 #else
380 static inline uint64_t
381 tcp_lro_msb_64(uint64_t x)
382 {
383 x |= (x >> 1);
384 x |= (x >> 2);
385 x |= (x >> 4);
386 x |= (x >> 8);
387 x |= (x >> 16);
388 x |= (x >> 32);
389 return (x & ~(x >> 1));
390 }
391 #endif
392
393 /*
394 * The tcp_lro_sort() routine is comparable to qsort(), except it has
395 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
396 * number of elements to sort and 64 is the number of sequence bits
397 * available. The algorithm is bit-slicing the 64-bit sequence number,
398 * sorting one bit at a time from the most significant bit until the
399 * least significant one, skipping the constant bits. This is
400 * typically called a radix sort.
401 */
402 static void
403 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
404 {
405 struct lro_mbuf_sort temp;
406 uint64_t ones;
407 uint64_t zeros;
408 uint32_t x;
409 uint32_t y;
410
411 repeat:
412 /* for small arrays insertion sort is faster */
413 if (size <= 12) {
414 for (x = 1; x < size; x++) {
415 temp = parray[x];
416 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
417 parray[y] = parray[y - 1];
418 parray[y] = temp;
419 }
420 return;
421 }
422
423 /* compute sequence bits which are constant */
424 ones = 0;
425 zeros = 0;
426 for (x = 0; x != size; x++) {
427 ones |= parray[x].seq;
428 zeros |= ~parray[x].seq;
429 }
430
431 /* compute bits which are not constant into "ones" */
432 ones &= zeros;
433 if (ones == 0)
434 return;
435
436 /* pick the most significant bit which is not constant */
437 ones = tcp_lro_msb_64(ones);
438
439 /*
440 * Move entries having cleared sequence bits to the beginning
441 * of the array:
442 */
443 for (x = y = 0; y != size; y++) {
444 /* skip set bits */
445 if (parray[y].seq & ones)
446 continue;
447 /* swap entries */
448 temp = parray[x];
449 parray[x] = parray[y];
450 parray[y] = temp;
451 x++;
452 }
453
454 KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
455
456 /* sort zeros */
457 tcp_lro_sort(parray, x);
458
459 /* sort ones */
460 parray += x;
461 size -= x;
462 goto repeat;
463 }
464
465 void
466 tcp_lro_flush_all(struct lro_ctrl *lc)
467 {
468 uint64_t seq;
469 uint64_t nseq;
470 unsigned x;
471
472 /* check if no mbufs to flush */
473 if (lc->lro_mbuf_count == 0)
474 goto done;
475
476 /* sort all mbufs according to stream */
477 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
478
479 /* input data into LRO engine, stream by stream */
480 seq = 0;
481 for (x = 0; x != lc->lro_mbuf_count; x++) {
482 struct mbuf *mb;
483
484 /* get mbuf */
485 mb = lc->lro_mbuf_data[x].mb;
486
487 /* get sequence number, masking away the packet index */
488 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
489
490 /* check for new stream */
491 if (seq != nseq) {
492 seq = nseq;
493
494 /* flush active streams */
495 tcp_lro_rx_done(lc);
496 }
497
498 /* add packet to LRO engine */
499 if (tcp_lro_rx(lc, mb, 0) != 0) {
500 /* input packet to network layer */
501 (*lc->ifp->if_input)(lc->ifp, mb);
502 lc->lro_queued++;
503 lc->lro_flushed++;
504 }
505 }
506 done:
507 /* flush active streams */
508 tcp_lro_rx_done(lc);
509
510 lc->lro_mbuf_count = 0;
511 }
512
513 #ifdef INET6
514 static int
515 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6,
516 struct tcphdr **th)
517 {
518
519 /* XXX-BZ we should check the flow-label. */
520
521 /* XXX-BZ We do not yet support ext. hdrs. */
522 if (ip6->ip6_nxt != IPPROTO_TCP)
523 return (TCP_LRO_NOT_SUPPORTED);
524
525 /* Find the TCP header. */
526 *th = (struct tcphdr *)(ip6 + 1);
527
528 return (0);
529 }
530 #endif
531
532 #ifdef INET
533 static int
534 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4,
535 struct tcphdr **th)
536 {
537 int csum_flags;
538 uint16_t csum;
539
540 if (ip4->ip_p != IPPROTO_TCP)
541 return (TCP_LRO_NOT_SUPPORTED);
542
543 /* Ensure there are no options. */
544 if ((ip4->ip_hl << 2) != sizeof (*ip4))
545 return (TCP_LRO_CANNOT);
546
547 /* .. and the packet is not fragmented. */
548 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK))
549 return (TCP_LRO_CANNOT);
550
551 /* Legacy IP has a header checksum that needs to be correct. */
552 csum_flags = m->m_pkthdr.csum_flags;
553 if (csum_flags & CSUM_IP_CHECKED) {
554 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
555 lc->lro_bad_csum++;
556 return (TCP_LRO_CANNOT);
557 }
558 } else {
559 csum = in_cksum_hdr(ip4);
560 if (__predict_false((csum) != 0)) {
561 lc->lro_bad_csum++;
562 return (TCP_LRO_CANNOT);
563 }
564 }
565
566 /* Find the TCP header (we assured there are no IP options). */
567 *th = (struct tcphdr *)(ip4 + 1);
568
569 return (0);
570 }
571 #endif
572
573 int
574 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
575 {
576 struct lro_entry *le;
577 struct ether_header *eh;
578 #ifdef INET6
579 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
580 #endif
581 #ifdef INET
582 struct ip *ip4 = NULL; /* Keep compiler happy. */
583 #endif
584 struct tcphdr *th;
585 void *l3hdr = NULL; /* Keep compiler happy. */
586 uint32_t *ts_ptr;
587 tcp_seq seq;
588 int error, ip_len, l;
589 uint16_t eh_type, tcp_data_len;
590 int force_flush = 0;
591
592 /* We expect a contiguous header [eh, ip, tcp]. */
593
594 eh = mtod(m, struct ether_header *);
595 eh_type = ntohs(eh->ether_type);
596 switch (eh_type) {
597 #ifdef INET6
598 case ETHERTYPE_IPV6:
599 {
600 CURVNET_SET(lc->ifp->if_vnet);
601 if (V_ip6_forwarding != 0) {
602 /* XXX-BZ stats but changing lro_ctrl is a problem. */
603 CURVNET_RESTORE();
604 return (TCP_LRO_CANNOT);
605 }
606 CURVNET_RESTORE();
607 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
608 error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
609 if (error != 0)
610 return (error);
611 tcp_data_len = ntohs(ip6->ip6_plen);
612 ip_len = sizeof(*ip6) + tcp_data_len;
613 break;
614 }
615 #endif
616 #ifdef INET
617 case ETHERTYPE_IP:
618 {
619 CURVNET_SET(lc->ifp->if_vnet);
620 if (V_ipforwarding != 0) {
621 /* XXX-BZ stats but changing lro_ctrl is a problem. */
622 CURVNET_RESTORE();
623 return (TCP_LRO_CANNOT);
624 }
625 CURVNET_RESTORE();
626 l3hdr = ip4 = (struct ip *)(eh + 1);
627 error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
628 if (error != 0)
629 return (error);
630 ip_len = ntohs(ip4->ip_len);
631 tcp_data_len = ip_len - sizeof(*ip4);
632 break;
633 }
634 #endif
635 /* XXX-BZ what happens in case of VLAN(s)? */
636 default:
637 return (TCP_LRO_NOT_SUPPORTED);
638 }
639
640 /*
641 * If the frame is padded beyond the end of the IP packet, then we must
642 * trim the extra bytes off.
643 */
644 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len);
645 if (l != 0) {
646 if (l < 0)
647 /* Truncated packet. */
648 return (TCP_LRO_CANNOT);
649
650 m_adj(m, -l);
651 }
652
653 /*
654 * Check TCP header constraints.
655 */
656 /* Ensure no bits set besides ACK or PSH. */
657 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
658 if (th->th_flags & TH_SYN)
659 return (TCP_LRO_CANNOT);
660 /*
661 * Make sure that previously seen segements/ACKs are delivered
662 * before this segement, e.g. FIN.
663 */
664 force_flush = 1;
665 }
666
667 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */
668 /* XXX-BZ Ideally we'd flush on PUSH? */
669
670 /*
671 * Check for timestamps.
672 * Since the only option we handle are timestamps, we only have to
673 * handle the simple case of aligned timestamps.
674 */
675 l = (th->th_off << 2);
676 tcp_data_len -= l;
677 l -= sizeof(*th);
678 ts_ptr = (uint32_t *)(th + 1);
679 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
680 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
681 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
682 /*
683 * Make sure that previously seen segements/ACKs are delivered
684 * before this segement.
685 */
686 force_flush = 1;
687 }
688
689 /* If the driver did not pass in the checksum, set it now. */
690 if (csum == 0x0000)
691 csum = th->th_sum;
692
693 seq = ntohl(th->th_seq);
694
695 /* Try to find a matching previous segment. */
696 LIST_FOREACH(le, &lc->lro_active, next) {
697 if (le->eh_type != eh_type)
698 continue;
699 if (le->source_port != th->th_sport ||
700 le->dest_port != th->th_dport)
701 continue;
702 switch (eh_type) {
703 #ifdef INET6
704 case ETHERTYPE_IPV6:
705 if (bcmp(&le->source_ip6, &ip6->ip6_src,
706 sizeof(struct in6_addr)) != 0 ||
707 bcmp(&le->dest_ip6, &ip6->ip6_dst,
708 sizeof(struct in6_addr)) != 0)
709 continue;
710 break;
711 #endif
712 #ifdef INET
713 case ETHERTYPE_IP:
714 if (le->source_ip4 != ip4->ip_src.s_addr ||
715 le->dest_ip4 != ip4->ip_dst.s_addr)
716 continue;
717 break;
718 #endif
719 }
720
721 if (force_flush) {
722 /* Timestamps mismatch; this is a FIN, etc */
723 tcp_lro_active_remove(le);
724 tcp_lro_flush(lc, le);
725 return (TCP_LRO_CANNOT);
726 }
727
728 /* Flush now if appending will result in overflow. */
729 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
730 tcp_lro_active_remove(le);
731 tcp_lro_flush(lc, le);
732 break;
733 }
734
735 /* Try to append the new segment. */
736 if (__predict_false(seq != le->next_seq ||
737 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) {
738 /* Out of order packet or duplicate ACK. */
739 tcp_lro_active_remove(le);
740 tcp_lro_flush(lc, le);
741 return (TCP_LRO_CANNOT);
742 }
743
744 if (l != 0) {
745 uint32_t tsval = ntohl(*(ts_ptr + 1));
746 /* Make sure timestamp values are increasing. */
747 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */
748 if (__predict_false(le->tsval > tsval ||
749 *(ts_ptr + 2) == 0))
750 return (TCP_LRO_CANNOT);
751 le->tsval = tsval;
752 le->tsecr = *(ts_ptr + 2);
753 }
754
755 le->next_seq += tcp_data_len;
756 le->ack_seq = th->th_ack;
757 le->window = th->th_win;
758 le->append_cnt++;
759
760 #ifdef TCP_LRO_UPDATE_CSUM
761 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th,
762 tcp_data_len, ~csum);
763 #endif
764
765 if (tcp_data_len == 0) {
766 m_freem(m);
767 /*
768 * Flush this LRO entry, if this ACK should not
769 * be further delayed.
770 */
771 if (le->append_cnt >= lc->lro_ackcnt_lim) {
772 tcp_lro_active_remove(le);
773 tcp_lro_flush(lc, le);
774 }
775 return (0);
776 }
777
778 le->p_len += tcp_data_len;
779
780 /*
781 * Adjust the mbuf so that m_data points to the first byte of
782 * the ULP payload. Adjust the mbuf to avoid complications and
783 * append new segment to existing mbuf chain.
784 */
785 m_adj(m, m->m_pkthdr.len - tcp_data_len);
786 m_demote_pkthdr(m);
787
788 le->m_tail->m_next = m;
789 le->m_tail = m_last(m);
790
791 /*
792 * If a possible next full length packet would cause an
793 * overflow, pro-actively flush now.
794 */
795 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) {
796 tcp_lro_active_remove(le);
797 tcp_lro_flush(lc, le);
798 } else
799 getmicrotime(&le->mtime);
800
801 return (0);
802 }
803
804 if (force_flush) {
805 /*
806 * Nothing to flush, but this segment can not be further
807 * aggregated/delayed.
808 */
809 return (TCP_LRO_CANNOT);
810 }
811
812 /* Try to find an empty slot. */
813 if (LIST_EMPTY(&lc->lro_free))
814 return (TCP_LRO_NO_ENTRIES);
815
816 /* Start a new segment chain. */
817 le = LIST_FIRST(&lc->lro_free);
818 LIST_REMOVE(le, next);
819 tcp_lro_active_insert(lc, le);
820 getmicrotime(&le->mtime);
821
822 /* Start filling in details. */
823 switch (eh_type) {
824 #ifdef INET6
825 case ETHERTYPE_IPV6:
826 le->le_ip6 = ip6;
827 le->source_ip6 = ip6->ip6_src;
828 le->dest_ip6 = ip6->ip6_dst;
829 le->eh_type = eh_type;
830 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
831 break;
832 #endif
833 #ifdef INET
834 case ETHERTYPE_IP:
835 le->le_ip4 = ip4;
836 le->source_ip4 = ip4->ip_src.s_addr;
837 le->dest_ip4 = ip4->ip_dst.s_addr;
838 le->eh_type = eh_type;
839 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
840 break;
841 #endif
842 }
843 le->source_port = th->th_sport;
844 le->dest_port = th->th_dport;
845
846 le->next_seq = seq + tcp_data_len;
847 le->ack_seq = th->th_ack;
848 le->window = th->th_win;
849 if (l != 0) {
850 le->timestamp = 1;
851 le->tsval = ntohl(*(ts_ptr + 1));
852 le->tsecr = *(ts_ptr + 2);
853 }
854
855 #ifdef TCP_LRO_UPDATE_CSUM
856 /*
857 * Do not touch the csum of the first packet. However save the
858 * "adjusted" checksum of just the source and destination addresses,
859 * the next header and the TCP payload. The length and TCP header
860 * parts may change, so we remove those from the saved checksum and
861 * re-add with final values on tcp_lro_flush() if needed.
862 */
863 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n",
864 __func__, le, le->ulp_csum));
865
866 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
867 ~csum);
868 th->th_sum = csum; /* Restore checksum on first packet. */
869 #endif
870
871 le->m_head = m;
872 le->m_tail = m_last(m);
873
874 return (0);
875 }
876
877 void
878 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
879 {
880 /* sanity checks */
881 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
882 lc->lro_mbuf_max == 0)) {
883 /* packet drop */
884 m_freem(mb);
885 return;
886 }
887
888 /* check if packet is not LRO capable */
889 if (__predict_false(mb->m_pkthdr.csum_flags == 0 ||
890 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
891
892 /* input packet to network layer */
893 (*lc->ifp->if_input) (lc->ifp, mb);
894 return;
895 }
896
897 /* create sequence number */
898 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
899 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
900 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
901 ((uint64_t)lc->lro_mbuf_count);
902
903 /* enter mbuf */
904 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
905
906 /* flush if array is full */
907 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
908 tcp_lro_flush_all(lc);
909 }
910
911 /* end */
Cache object: c7f16c0ee513a54cdd791bd7af5e6870
|