FreeBSD/Linux Kernel Cross Reference
sys/dev/kttcp.c
1 /* $NetBSD: kttcp.c,v 1.21.2.1 2007/02/18 00:05:38 tron Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden and Jason R. Thorpe for
8 * Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * kttcp.c -- provides kernel support for testing network testing,
41 * see kttcp(4)
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: kttcp.c,v 1.21.2.1 2007/02/18 00:05:38 tron Exp $");
46
47 #include <sys/param.h>
48 #include <sys/types.h>
49 #include <sys/ioctl.h>
50 #include <sys/file.h>
51 #include <sys/filedesc.h>
52 #include <sys/conf.h>
53 #include <sys/systm.h>
54 #include <sys/protosw.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signal.h>
58 #include <sys/socketvar.h>
59 #include <sys/socket.h>
60 #include <sys/mbuf.h>
61 #include <sys/sa.h>
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64
65 #include <dev/kttcpio.h>
66
67 static int kttcp_send(struct lwp *l, struct kttcp_io_args *);
68 static int kttcp_recv(struct lwp *l, struct kttcp_io_args *);
69 static int kttcp_sosend(struct socket *, unsigned long long,
70 unsigned long long *, struct lwp *, int);
71 static int kttcp_soreceive(struct socket *, unsigned long long,
72 unsigned long long *, struct lwp *, int *);
73
74 void kttcpattach(int);
75
76 dev_type_ioctl(kttcpioctl);
77
78 const struct cdevsw kttcp_cdevsw = {
79 nullopen, nullclose, noread, nowrite, kttcpioctl,
80 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER
81 };
82
83 void
84 kttcpattach(int count)
85 {
86 /* Do nothing. */
87 }
88
89 int
90 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag,
91 struct lwp *l)
92 {
93 int error;
94
95 if ((flag & FWRITE) == 0)
96 return EPERM;
97
98 switch (cmd) {
99 case KTTCP_IO_SEND:
100 error = kttcp_send(l, (struct kttcp_io_args *) data);
101 break;
102
103 case KTTCP_IO_RECV:
104 error = kttcp_recv(l, (struct kttcp_io_args *) data);
105 break;
106
107 default:
108 return EINVAL;
109 }
110
111 return error;
112 }
113
114 static int
115 kttcp_send(struct lwp *l, struct kttcp_io_args *kio)
116 {
117 struct file *fp;
118 int error;
119 struct timeval t0, t1;
120 unsigned long long len, done;
121
122 if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
123 return EINVAL;
124
125 fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket);
126 if (fp == NULL)
127 return EBADF;
128 FILE_USE(fp);
129 if (fp->f_type != DTYPE_SOCKET) {
130 FILE_UNUSE(fp, l);
131 return EFTYPE;
132 }
133
134 len = kio->kio_totalsize;
135 microtime(&t0);
136 do {
137 error = kttcp_sosend((struct socket *)fp->f_data, len,
138 &done, l, 0);
139 len -= done;
140 } while (error == 0 && len > 0);
141
142 FILE_UNUSE(fp, l);
143
144 microtime(&t1);
145 if (error != 0)
146 return error;
147 timersub(&t1, &t0, &kio->kio_elapsed);
148
149 kio->kio_bytesdone = kio->kio_totalsize - len;
150
151 return 0;
152 }
153
154 static int
155 kttcp_recv(struct lwp *l, struct kttcp_io_args *kio)
156 {
157 struct file *fp;
158 int error;
159 struct timeval t0, t1;
160 unsigned long long len, done;
161
162 done = 0; /* XXX gcc */
163
164 if (kio->kio_totalsize > KTTCP_MAX_XMIT)
165 return EINVAL;
166
167 fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket);
168 if (fp == NULL)
169 return EBADF;
170 FILE_USE(fp);
171 if (fp->f_type != DTYPE_SOCKET) {
172 FILE_UNUSE(fp, l);
173 return EBADF;
174 }
175 len = kio->kio_totalsize;
176 microtime(&t0);
177 do {
178 error = kttcp_soreceive((struct socket *)fp->f_data,
179 len, &done, l, NULL);
180 len -= done;
181 } while (error == 0 && len > 0 && done > 0);
182
183 FILE_UNUSE(fp, l);
184
185 microtime(&t1);
186 if (error == EPIPE)
187 error = 0;
188 if (error != 0)
189 return error;
190 timersub(&t1, &t0, &kio->kio_elapsed);
191
192 kio->kio_bytesdone = kio->kio_totalsize - len;
193
194 return 0;
195 }
196
197 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
198
199 /*
200 * Slightly changed version of sosend()
201 */
202 static int
203 kttcp_sosend(struct socket *so, unsigned long long slen,
204 unsigned long long *done, struct lwp *l, int flags)
205 {
206 struct mbuf **mp, *m, *top;
207 long space, len, mlen;
208 int error, s, dontroute, atomic;
209 long long resid;
210
211 atomic = sosendallatonce(so);
212 resid = slen;
213 top = NULL;
214 /*
215 * In theory resid should be unsigned.
216 * However, space must be signed, as it might be less than 0
217 * if we over-committed, and we must use a signed comparison
218 * of space and resid. On the other hand, a negative resid
219 * causes us to loop sending 0-length segments to the protocol.
220 */
221 if (resid < 0) {
222 error = EINVAL;
223 goto out;
224 }
225 dontroute =
226 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
227 (so->so_proto->pr_flags & PR_ATOMIC);
228 /* WRS XXX - are we doing per-lwp or per-proc stats? */
229 l->l_proc->p_stats->p_ru.ru_msgsnd++;
230 #define snderr(errno) { error = errno; splx(s); goto release; }
231
232 restart:
233 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
234 goto out;
235 do {
236 s = splsoftnet();
237 if (so->so_state & SS_CANTSENDMORE)
238 snderr(EPIPE);
239 if (so->so_error) {
240 error = so->so_error;
241 so->so_error = 0;
242 splx(s);
243 goto release;
244 }
245 if ((so->so_state & SS_ISCONNECTED) == 0) {
246 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
247 if ((so->so_state & SS_ISCONFIRMING) == 0)
248 snderr(ENOTCONN);
249 } else
250 snderr(EDESTADDRREQ);
251 }
252 space = sbspace(&so->so_snd);
253 if (flags & MSG_OOB)
254 space += 1024;
255 if ((atomic && resid > so->so_snd.sb_hiwat))
256 snderr(EMSGSIZE);
257 if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
258 if (so->so_state & SS_NBIO)
259 snderr(EWOULDBLOCK);
260 SBLASTRECORDCHK(&so->so_rcv,
261 "kttcp_soreceive sbwait 1");
262 SBLASTMBUFCHK(&so->so_rcv,
263 "kttcp_soreceive sbwait 1");
264 sbunlock(&so->so_snd);
265 error = sbwait(&so->so_snd);
266 splx(s);
267 if (error)
268 goto out;
269 goto restart;
270 }
271 splx(s);
272 mp = ⊤
273 do {
274 do {
275 if (top == 0) {
276 m = m_gethdr(M_WAIT, MT_DATA);
277 mlen = MHLEN;
278 m->m_pkthdr.len = 0;
279 m->m_pkthdr.rcvif = NULL;
280 } else {
281 m = m_get(M_WAIT, MT_DATA);
282 mlen = MLEN;
283 }
284 if (resid >= MINCLSIZE && space >= MCLBYTES) {
285 m_clget(m, M_WAIT);
286 if ((m->m_flags & M_EXT) == 0)
287 goto nopages;
288 mlen = MCLBYTES;
289 #ifdef MAPPED_MBUFS
290 len = lmin(MCLBYTES, resid);
291 #else
292 if (atomic && top == 0) {
293 len = lmin(MCLBYTES - max_hdr,
294 resid);
295 m->m_data += max_hdr;
296 } else
297 len = lmin(MCLBYTES, resid);
298 #endif
299 space -= len;
300 } else {
301 nopages:
302 len = lmin(lmin(mlen, resid), space);
303 space -= len;
304 /*
305 * For datagram protocols, leave room
306 * for protocol headers in first mbuf.
307 */
308 if (atomic && top == 0 && len < mlen)
309 MH_ALIGN(m, len);
310 }
311 resid -= len;
312 m->m_len = len;
313 *mp = m;
314 top->m_pkthdr.len += len;
315 if (error)
316 goto release;
317 mp = &m->m_next;
318 if (resid <= 0) {
319 if (flags & MSG_EOR)
320 top->m_flags |= M_EOR;
321 break;
322 }
323 } while (space > 0 && atomic);
324
325 s = splsoftnet();
326
327 if (so->so_state & SS_CANTSENDMORE)
328 snderr(EPIPE);
329
330 if (dontroute)
331 so->so_options |= SO_DONTROUTE;
332 if (resid > 0)
333 so->so_state |= SS_MORETOCOME;
334 error = (*so->so_proto->pr_usrreq)(so,
335 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
336 top, NULL, NULL, l);
337 if (dontroute)
338 so->so_options &= ~SO_DONTROUTE;
339 if (resid > 0)
340 so->so_state &= ~SS_MORETOCOME;
341 splx(s);
342
343 top = 0;
344 mp = ⊤
345 if (error)
346 goto release;
347 } while (resid && space > 0);
348 } while (resid);
349
350 release:
351 sbunlock(&so->so_snd);
352 out:
353 if (top)
354 m_freem(top);
355 *done = slen - resid;
356 #if 0
357 printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
358 #endif
359 return (error);
360 }
361
362 static int
363 kttcp_soreceive(struct socket *so, unsigned long long slen,
364 unsigned long long *done, struct lwp *l, int *flagsp)
365 {
366 struct mbuf *m, **mp;
367 int flags, len, error, s, offset, moff, type;
368 long long orig_resid, resid;
369 const struct protosw *pr;
370 struct mbuf *nextrecord;
371
372 pr = so->so_proto;
373 mp = NULL;
374 type = 0;
375 resid = orig_resid = slen;
376 if (flagsp)
377 flags = *flagsp &~ MSG_EOR;
378 else
379 flags = 0;
380 if (flags & MSG_OOB) {
381 m = m_get(M_WAIT, MT_DATA);
382 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
383 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, NULL);
384 if (error)
385 goto bad;
386 do {
387 resid -= min(resid, m->m_len);
388 m = m_free(m);
389 } while (resid && error == 0 && m);
390 bad:
391 if (m)
392 m_freem(m);
393 return (error);
394 }
395 if (mp)
396 *mp = NULL;
397 if (so->so_state & SS_ISCONFIRMING && resid)
398 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, NULL);
399
400 restart:
401 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
402 return (error);
403 s = splsoftnet();
404
405 m = so->so_rcv.sb_mb;
406 /*
407 * If we have less data than requested, block awaiting more
408 * (subject to any timeout) if:
409 * 1. the current count is less than the low water mark,
410 * 2. MSG_WAITALL is set, and it is possible to do the entire
411 * receive operation at once if we block (resid <= hiwat), or
412 * 3. MSG_DONTWAIT is not set.
413 * If MSG_WAITALL is set but resid is larger than the receive buffer,
414 * we have to do the receive in sections, and thus risk returning
415 * a short count if a timeout or signal occurs after we start.
416 */
417 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
418 so->so_rcv.sb_cc < resid) &&
419 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
420 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
421 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
422 #ifdef DIAGNOSTIC
423 if (m == NULL && so->so_rcv.sb_cc)
424 panic("receive 1");
425 #endif
426 if (so->so_error) {
427 if (m)
428 goto dontblock;
429 error = so->so_error;
430 if ((flags & MSG_PEEK) == 0)
431 so->so_error = 0;
432 goto release;
433 }
434 if (so->so_state & SS_CANTRCVMORE) {
435 if (m)
436 goto dontblock;
437 else
438 goto release;
439 }
440 for (; m; m = m->m_next)
441 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
442 m = so->so_rcv.sb_mb;
443 goto dontblock;
444 }
445 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
446 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
447 error = ENOTCONN;
448 goto release;
449 }
450 if (resid == 0)
451 goto release;
452 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
453 error = EWOULDBLOCK;
454 goto release;
455 }
456 sbunlock(&so->so_rcv);
457 error = sbwait(&so->so_rcv);
458 splx(s);
459 if (error)
460 return (error);
461 goto restart;
462 }
463 dontblock:
464 /*
465 * On entry here, m points to the first record of the socket buffer.
466 * While we process the initial mbufs containing address and control
467 * info, we save a copy of m->m_nextpkt into nextrecord.
468 */
469 #ifdef notyet /* XXXX */
470 if (uio->uio_lwp)
471 uio->uio_lwp->l_proc->p_stats->p_ru.ru_msgrcv++;
472 #endif
473 KASSERT(m == so->so_rcv.sb_mb);
474 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
475 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
476 nextrecord = m->m_nextpkt;
477 if (pr->pr_flags & PR_ADDR) {
478 #ifdef DIAGNOSTIC
479 if (m->m_type != MT_SONAME)
480 panic("receive 1a");
481 #endif
482 orig_resid = 0;
483 if (flags & MSG_PEEK) {
484 m = m->m_next;
485 } else {
486 sbfree(&so->so_rcv, m);
487 MFREE(m, so->so_rcv.sb_mb);
488 m = so->so_rcv.sb_mb;
489 }
490 }
491 while (m && m->m_type == MT_CONTROL && error == 0) {
492 if (flags & MSG_PEEK) {
493 m = m->m_next;
494 } else {
495 sbfree(&so->so_rcv, m);
496 MFREE(m, so->so_rcv.sb_mb);
497 m = so->so_rcv.sb_mb;
498 }
499 }
500
501 /*
502 * If m is non-NULL, we have some data to read. From now on,
503 * make sure to keep sb_lastrecord consistent when working on
504 * the last packet on the chain (nextrecord == NULL) and we
505 * change m->m_nextpkt.
506 */
507 if (m) {
508 if ((flags & MSG_PEEK) == 0) {
509 m->m_nextpkt = nextrecord;
510 /*
511 * If nextrecord == NULL (this is a single chain),
512 * then sb_lastrecord may not be valid here if m
513 * was changed earlier.
514 */
515 if (nextrecord == NULL) {
516 KASSERT(so->so_rcv.sb_mb == m);
517 so->so_rcv.sb_lastrecord = m;
518 }
519 }
520 type = m->m_type;
521 if (type == MT_OOBDATA)
522 flags |= MSG_OOB;
523 } else {
524 if ((flags & MSG_PEEK) == 0) {
525 KASSERT(so->so_rcv.sb_mb == m);
526 so->so_rcv.sb_mb = nextrecord;
527 SB_EMPTY_FIXUP(&so->so_rcv);
528 }
529 }
530 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
531 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
532
533 moff = 0;
534 offset = 0;
535 while (m && resid > 0 && error == 0) {
536 if (m->m_type == MT_OOBDATA) {
537 if (type != MT_OOBDATA)
538 break;
539 } else if (type == MT_OOBDATA)
540 break;
541 #ifdef DIAGNOSTIC
542 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
543 panic("receive 3");
544 #endif
545 so->so_state &= ~SS_RCVATMARK;
546 len = resid;
547 if (so->so_oobmark && len > so->so_oobmark - offset)
548 len = so->so_oobmark - offset;
549 if (len > m->m_len - moff)
550 len = m->m_len - moff;
551 /*
552 * If mp is set, just pass back the mbufs.
553 * Otherwise copy them out via the uio, then free.
554 * Sockbuf must be consistent here (points to current mbuf,
555 * it points to next record) when we drop priority;
556 * we must note any additions to the sockbuf when we
557 * block interrupts again.
558 */
559 resid -= len;
560 if (len == m->m_len - moff) {
561 if (m->m_flags & M_EOR)
562 flags |= MSG_EOR;
563 if (flags & MSG_PEEK) {
564 m = m->m_next;
565 moff = 0;
566 } else {
567 nextrecord = m->m_nextpkt;
568 sbfree(&so->so_rcv, m);
569 if (mp) {
570 *mp = m;
571 mp = &m->m_next;
572 so->so_rcv.sb_mb = m = m->m_next;
573 *mp = NULL;
574 } else {
575 MFREE(m, so->so_rcv.sb_mb);
576 m = so->so_rcv.sb_mb;
577 }
578 /*
579 * If m != NULL, we also know that
580 * so->so_rcv.sb_mb != NULL.
581 */
582 KASSERT(so->so_rcv.sb_mb == m);
583 if (m) {
584 m->m_nextpkt = nextrecord;
585 if (nextrecord == NULL)
586 so->so_rcv.sb_lastrecord = m;
587 } else {
588 so->so_rcv.sb_mb = nextrecord;
589 SB_EMPTY_FIXUP(&so->so_rcv);
590 }
591 SBLASTRECORDCHK(&so->so_rcv,
592 "kttcp_soreceive 3");
593 SBLASTMBUFCHK(&so->so_rcv,
594 "kttcp_soreceive 3");
595 }
596 } else {
597 if (flags & MSG_PEEK)
598 moff += len;
599 else {
600 if (mp)
601 *mp = m_copym(m, 0, len, M_WAIT);
602 m->m_data += len;
603 m->m_len -= len;
604 so->so_rcv.sb_cc -= len;
605 }
606 }
607 if (so->so_oobmark) {
608 if ((flags & MSG_PEEK) == 0) {
609 so->so_oobmark -= len;
610 if (so->so_oobmark == 0) {
611 so->so_state |= SS_RCVATMARK;
612 break;
613 }
614 } else {
615 offset += len;
616 if (offset == so->so_oobmark)
617 break;
618 }
619 }
620 if (flags & MSG_EOR)
621 break;
622 /*
623 * If the MSG_WAITALL flag is set (for non-atomic socket),
624 * we must not quit until "uio->uio_resid == 0" or an error
625 * termination. If a signal/timeout occurs, return
626 * with a short count but without error.
627 * Keep sockbuf locked against other readers.
628 */
629 while (flags & MSG_WAITALL && m == NULL && resid > 0 &&
630 !sosendallatonce(so) && !nextrecord) {
631 if (so->so_error || so->so_state & SS_CANTRCVMORE)
632 break;
633 /*
634 * If we are peeking and the socket receive buffer is
635 * full, stop since we can't get more data to peek at.
636 */
637 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
638 break;
639 /*
640 * If we've drained the socket buffer, tell the
641 * protocol in case it needs to do something to
642 * get it filled again.
643 */
644 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
645 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
646 (struct mbuf *)(long)flags, NULL, NULL);
647 SBLASTRECORDCHK(&so->so_rcv,
648 "kttcp_soreceive sbwait 2");
649 SBLASTMBUFCHK(&so->so_rcv,
650 "kttcp_soreceive sbwait 2");
651 error = sbwait(&so->so_rcv);
652 if (error) {
653 sbunlock(&so->so_rcv);
654 splx(s);
655 return (0);
656 }
657 if ((m = so->so_rcv.sb_mb) != NULL)
658 nextrecord = m->m_nextpkt;
659 }
660 }
661
662 if (m && pr->pr_flags & PR_ATOMIC) {
663 flags |= MSG_TRUNC;
664 if ((flags & MSG_PEEK) == 0)
665 (void) sbdroprecord(&so->so_rcv);
666 }
667 if ((flags & MSG_PEEK) == 0) {
668 if (m == NULL) {
669 /*
670 * First part is an SB_EMPTY_FIXUP(). Second part
671 * makes sure sb_lastrecord is up-to-date if
672 * there is still data in the socket buffer.
673 */
674 so->so_rcv.sb_mb = nextrecord;
675 if (so->so_rcv.sb_mb == NULL) {
676 so->so_rcv.sb_mbtail = NULL;
677 so->so_rcv.sb_lastrecord = NULL;
678 } else if (nextrecord->m_nextpkt == NULL)
679 so->so_rcv.sb_lastrecord = nextrecord;
680 }
681 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
682 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
683 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
684 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
685 (struct mbuf *)(long)flags, NULL, NULL);
686 }
687 if (orig_resid == resid && orig_resid &&
688 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
689 sbunlock(&so->so_rcv);
690 splx(s);
691 goto restart;
692 }
693
694 if (flagsp)
695 *flagsp |= flags;
696 release:
697 sbunlock(&so->so_rcv);
698 splx(s);
699 *done = slen - resid;
700 #if 0
701 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
702 #endif
703 return (error);
704 }
Cache object: 72d03a8b26f04fb83b6bc86144754c74
|