1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_mac.h"
36 #include "opt_param.h"
37
38 #include <sys/param.h>
39 #include <sys/aio.h> /* for aio_swake proto */
40 #include <sys/domain.h>
41 #include <sys/event.h>
42 #include <sys/file.h> /* for maxfiles */
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mac.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/protosw.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/systm.h>
58
59 int maxsockets;
60
61 void (*aio_swake)(struct socket *, struct sockbuf *);
62
63 /*
64 * Primitive routines for operating on sockets and socket buffers
65 */
66
67 u_long sb_max = SB_MAX;
68 static u_long sb_max_adj =
69 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
70
71 static u_long sb_efficiency = 8; /* parameter for sbreserve() */
72
73 /*
74 * Procedures to manipulate state flags of socket
75 * and do appropriate wakeups. Normal sequence from the
76 * active (originating) side is that soisconnecting() is
77 * called during processing of connect() call,
78 * resulting in an eventual call to soisconnected() if/when the
79 * connection is established. When the connection is torn down
80 * soisdisconnecting() is called during processing of disconnect() call,
81 * and soisdisconnected() is called when the connection to the peer
82 * is totally severed. The semantics of these routines are such that
83 * connectionless protocols can call soisconnected() and soisdisconnected()
84 * only, bypassing the in-progress calls when setting up a ``connection''
85 * takes no time.
86 *
87 * From the passive side, a socket is created with
88 * two queues of sockets: so_incomp for connections in progress
89 * and so_comp for connections already made and awaiting user acceptance.
90 * As a protocol is preparing incoming connections, it creates a socket
91 * structure queued on so_incomp by calling sonewconn(). When the connection
92 * is established, soisconnected() is called, and transfers the
93 * socket structure to so_comp, making it available to accept().
94 *
95 * If a socket is closed with sockets on either
96 * so_incomp or so_comp, these sockets are dropped.
97 *
98 * If higher level protocols are implemented in
99 * the kernel, the wakeups done here will sometimes
100 * cause software-interrupt process scheduling.
101 */
102
103 void
104 soisconnecting(so)
105 register struct socket *so;
106 {
107
108 SOCK_LOCK(so);
109 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
110 so->so_state |= SS_ISCONNECTING;
111 SOCK_UNLOCK(so);
112 }
113
114 void
115 soisconnected(so)
116 struct socket *so;
117 {
118 struct socket *head;
119
120 SOCK_LOCK(so);
121 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
122 so->so_state |= SS_ISCONNECTED;
123 SOCK_UNLOCK(so);
124 ACCEPT_LOCK();
125 head = so->so_head;
126 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
127 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
128 TAILQ_REMOVE(&head->so_incomp, so, so_list);
129 head->so_incqlen--;
130 so->so_qstate &= ~SQ_INCOMP;
131 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
132 head->so_qlen++;
133 so->so_qstate |= SQ_COMP;
134 ACCEPT_UNLOCK();
135 sorwakeup(head);
136 wakeup_one(&head->so_timeo);
137 } else {
138 ACCEPT_UNLOCK();
139 SOCK_LOCK(so);
140 so->so_upcall =
141 head->so_accf->so_accept_filter->accf_callback;
142 so->so_upcallarg = head->so_accf->so_accept_filter_arg;
143 so->so_rcv.sb_flags |= SB_UPCALL;
144 so->so_options &= ~SO_ACCEPTFILTER;
145 SOCK_UNLOCK(so);
146 so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
147 }
148 return;
149 }
150 ACCEPT_UNLOCK();
151 wakeup(&so->so_timeo);
152 sorwakeup(so);
153 sowwakeup(so);
154 }
155
156 void
157 soisdisconnecting(so)
158 register struct socket *so;
159 {
160
161 /*
162 * XXXRW: This code assumes that SOCK_LOCK(so) and
163 * SOCKBUF_LOCK(&so->so_rcv) are the same.
164 */
165 SOCKBUF_LOCK(&so->so_rcv);
166 so->so_state &= ~SS_ISCONNECTING;
167 so->so_state |= SS_ISDISCONNECTING;
168 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
169 sorwakeup_locked(so);
170 SOCKBUF_LOCK(&so->so_snd);
171 so->so_snd.sb_state |= SBS_CANTSENDMORE;
172 sowwakeup_locked(so);
173 wakeup(&so->so_timeo);
174 }
175
176 void
177 soisdisconnected(so)
178 register struct socket *so;
179 {
180
181 /*
182 * XXXRW: This code assumes that SOCK_LOCK(so) and
183 * SOCKBUF_LOCK(&so->so_rcv) are the same.
184 */
185 SOCKBUF_LOCK(&so->so_rcv);
186 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
187 so->so_state |= SS_ISDISCONNECTED;
188 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
189 sorwakeup_locked(so);
190 SOCKBUF_LOCK(&so->so_snd);
191 so->so_snd.sb_state |= SBS_CANTSENDMORE;
192 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
193 sowwakeup_locked(so);
194 wakeup(&so->so_timeo);
195 }
196
197 /*
198 * When an attempt at a new connection is noted on a socket
199 * which accepts connections, sonewconn is called. If the
200 * connection is possible (subject to space constraints, etc.)
201 * then we allocate a new structure, propoerly linked into the
202 * data structure of the original socket, and return this.
203 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
204 *
205 * note: the ref count on the socket is 0 on return
206 */
207 struct socket *
208 sonewconn(head, connstatus)
209 register struct socket *head;
210 int connstatus;
211 {
212 register struct socket *so;
213 int over;
214
215 ACCEPT_LOCK();
216 over = (head->so_qlen > 3 * head->so_qlimit / 2);
217 ACCEPT_UNLOCK();
218 if (over)
219 return (NULL);
220 so = soalloc(M_NOWAIT);
221 if (so == NULL)
222 return (NULL);
223 if ((head->so_options & SO_ACCEPTFILTER) != 0)
224 connstatus = 0;
225 so->so_head = head;
226 so->so_type = head->so_type;
227 so->so_options = head->so_options &~ SO_ACCEPTCONN;
228 so->so_linger = head->so_linger;
229 so->so_state = head->so_state | SS_NOFDREF;
230 so->so_proto = head->so_proto;
231 so->so_timeo = head->so_timeo;
232 so->so_cred = crhold(head->so_cred);
233 #ifdef MAC
234 SOCK_LOCK(head);
235 mac_create_socket_from_socket(head, so);
236 SOCK_UNLOCK(head);
237 #endif
238 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
239 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
240 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
241 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
242 sodealloc(so);
243 return (NULL);
244 }
245 so->so_state |= connstatus;
246 ACCEPT_LOCK();
247 if (connstatus) {
248 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
249 so->so_qstate |= SQ_COMP;
250 head->so_qlen++;
251 } else {
252 /*
253 * Keep removing sockets from the head until there's room for
254 * us to insert on the tail. In pre-locking revisions, this
255 * was a simple if(), but as we could be racing with other
256 * threads and soabort() requires dropping locks, we must
257 * loop waiting for the condition to be true.
258 */
259 while (head->so_incqlen > head->so_qlimit) {
260 struct socket *sp;
261 sp = TAILQ_FIRST(&head->so_incomp);
262 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
263 head->so_incqlen--;
264 sp->so_qstate &= ~SQ_INCOMP;
265 sp->so_head = NULL;
266 ACCEPT_UNLOCK();
267 (void) soabort(sp);
268 ACCEPT_LOCK();
269 }
270 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
271 so->so_qstate |= SQ_INCOMP;
272 head->so_incqlen++;
273 }
274 ACCEPT_UNLOCK();
275 if (connstatus) {
276 sorwakeup(head);
277 wakeup_one(&head->so_timeo);
278 }
279 return (so);
280 }
281
282 /*
283 * Socantsendmore indicates that no more data will be sent on the
284 * socket; it would normally be applied to a socket when the user
285 * informs the system that no more data is to be sent, by the protocol
286 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
287 * will be received, and will normally be applied to the socket by a
288 * protocol when it detects that the peer will send no more data.
289 * Data queued for reading in the socket may yet be read.
290 */
291 void
292 socantsendmore_locked(so)
293 struct socket *so;
294 {
295
296 SOCKBUF_LOCK_ASSERT(&so->so_snd);
297
298 so->so_snd.sb_state |= SBS_CANTSENDMORE;
299 sowwakeup_locked(so);
300 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
301 }
302
303 void
304 socantsendmore(so)
305 struct socket *so;
306 {
307
308 SOCKBUF_LOCK(&so->so_snd);
309 socantsendmore_locked(so);
310 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
311 }
312
313 void
314 socantrcvmore_locked(so)
315 struct socket *so;
316 {
317
318 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
319
320 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
321 sorwakeup_locked(so);
322 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
323 }
324
325 void
326 socantrcvmore(so)
327 struct socket *so;
328 {
329
330 SOCKBUF_LOCK(&so->so_rcv);
331 socantrcvmore_locked(so);
332 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
333 }
334
335 /*
336 * Wait for data to arrive at/drain from a socket buffer.
337 */
338 int
339 sbwait(sb)
340 struct sockbuf *sb;
341 {
342
343 SOCKBUF_LOCK_ASSERT(sb);
344
345 sb->sb_flags |= SB_WAIT;
346 return (msleep(&sb->sb_cc, &sb->sb_mtx,
347 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
348 sb->sb_timeo));
349 }
350
351 /*
352 * Lock a sockbuf already known to be locked;
353 * return any error returned from sleep (EINTR).
354 */
355 int
356 sb_lock(sb)
357 register struct sockbuf *sb;
358 {
359 int error;
360
361 SOCKBUF_LOCK_ASSERT(sb);
362
363 while (sb->sb_flags & SB_LOCK) {
364 sb->sb_flags |= SB_WANT;
365 error = msleep(&sb->sb_flags, &sb->sb_mtx,
366 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH,
367 "sblock", 0);
368 if (error)
369 return (error);
370 }
371 sb->sb_flags |= SB_LOCK;
372 return (0);
373 }
374
375 /*
376 * Wakeup processes waiting on a socket buffer. Do asynchronous
377 * notification via SIGIO if the socket has the SS_ASYNC flag set.
378 *
379 * Called with the socket buffer lock held; will release the lock by the end
380 * of the function. This allows the caller to acquire the socket buffer lock
381 * while testing for the need for various sorts of wakeup and hold it through
382 * to the point where it's no longer required. We currently hold the lock
383 * through calls out to other subsystems (with the exception of kqueue), and
384 * then release it to avoid lock order issues. It's not clear that's
385 * correct.
386 */
387 void
388 sowakeup(so, sb)
389 register struct socket *so;
390 register struct sockbuf *sb;
391 {
392
393 SOCKBUF_LOCK_ASSERT(sb);
394
395 selwakeuppri(&sb->sb_sel, PSOCK);
396 sb->sb_flags &= ~SB_SEL;
397 if (sb->sb_flags & SB_WAIT) {
398 sb->sb_flags &= ~SB_WAIT;
399 wakeup(&sb->sb_cc);
400 }
401 KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
402 SOCKBUF_UNLOCK(sb);
403 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
404 pgsigio(&so->so_sigio, SIGIO, 0);
405 if (sb->sb_flags & SB_UPCALL)
406 (*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
407 if (sb->sb_flags & SB_AIO)
408 aio_swake(so, sb);
409 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
410 }
411
412 /*
413 * Socket buffer (struct sockbuf) utility routines.
414 *
415 * Each socket contains two socket buffers: one for sending data and
416 * one for receiving data. Each buffer contains a queue of mbufs,
417 * information about the number of mbufs and amount of data in the
418 * queue, and other fields allowing select() statements and notification
419 * on data availability to be implemented.
420 *
421 * Data stored in a socket buffer is maintained as a list of records.
422 * Each record is a list of mbufs chained together with the m_next
423 * field. Records are chained together with the m_nextpkt field. The upper
424 * level routine soreceive() expects the following conventions to be
425 * observed when placing information in the receive buffer:
426 *
427 * 1. If the protocol requires each message be preceded by the sender's
428 * name, then a record containing that name must be present before
429 * any associated data (mbuf's must be of type MT_SONAME).
430 * 2. If the protocol supports the exchange of ``access rights'' (really
431 * just additional data associated with the message), and there are
432 * ``rights'' to be received, then a record containing this data
433 * should be present (mbuf's must be of type MT_RIGHTS).
434 * 3. If a name or rights record exists, then it must be followed by
435 * a data record, perhaps of zero length.
436 *
437 * Before using a new socket structure it is first necessary to reserve
438 * buffer space to the socket, by calling sbreserve(). This should commit
439 * some of the available buffer space in the system buffer pool for the
440 * socket (currently, it does nothing but enforce limits). The space
441 * should be released by calling sbrelease() when the socket is destroyed.
442 */
443
444 int
445 soreserve(so, sndcc, rcvcc)
446 register struct socket *so;
447 u_long sndcc, rcvcc;
448 {
449 struct thread *td = curthread;
450
451 SOCKBUF_LOCK(&so->so_snd);
452 SOCKBUF_LOCK(&so->so_rcv);
453 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
454 goto bad;
455 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
456 goto bad2;
457 if (so->so_rcv.sb_lowat == 0)
458 so->so_rcv.sb_lowat = 1;
459 if (so->so_snd.sb_lowat == 0)
460 so->so_snd.sb_lowat = MCLBYTES;
461 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
462 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
463 SOCKBUF_UNLOCK(&so->so_rcv);
464 SOCKBUF_UNLOCK(&so->so_snd);
465 return (0);
466 bad2:
467 sbrelease_locked(&so->so_snd, so);
468 bad:
469 SOCKBUF_UNLOCK(&so->so_rcv);
470 SOCKBUF_UNLOCK(&so->so_snd);
471 return (ENOBUFS);
472 }
473
474 static int
475 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
476 {
477 int error = 0;
478 u_long old_sb_max = sb_max;
479
480 error = SYSCTL_OUT(req, arg1, sizeof(u_long));
481 if (error || !req->newptr)
482 return (error);
483 error = SYSCTL_IN(req, arg1, sizeof(u_long));
484 if (error)
485 return (error);
486 if (sb_max < MSIZE + MCLBYTES) {
487 sb_max = old_sb_max;
488 return (EINVAL);
489 }
490 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
491 return (0);
492 }
493
494 /*
495 * Allot mbufs to a sockbuf.
496 * Attempt to scale mbmax so that mbcnt doesn't become limiting
497 * if buffering efficiency is near the normal case.
498 */
499 int
500 sbreserve_locked(sb, cc, so, td)
501 struct sockbuf *sb;
502 u_long cc;
503 struct socket *so;
504 struct thread *td;
505 {
506 rlim_t sbsize_limit;
507
508 SOCKBUF_LOCK_ASSERT(sb);
509
510 /*
511 * td will only be NULL when we're in an interrupt
512 * (e.g. in tcp_input())
513 */
514 if (cc > sb_max_adj)
515 return (0);
516 if (td != NULL) {
517 PROC_LOCK(td->td_proc);
518 sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
519 PROC_UNLOCK(td->td_proc);
520 } else
521 sbsize_limit = RLIM_INFINITY;
522 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
523 sbsize_limit))
524 return (0);
525 sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
526 if (sb->sb_lowat > sb->sb_hiwat)
527 sb->sb_lowat = sb->sb_hiwat;
528 return (1);
529 }
530
531 int
532 sbreserve(sb, cc, so, td)
533 struct sockbuf *sb;
534 u_long cc;
535 struct socket *so;
536 struct thread *td;
537 {
538 int error;
539
540 SOCKBUF_LOCK(sb);
541 error = sbreserve_locked(sb, cc, so, td);
542 SOCKBUF_UNLOCK(sb);
543 return (error);
544 }
545
546 /*
547 * Free mbufs held by a socket, and reserved mbuf space.
548 */
549 void
550 sbrelease_locked(sb, so)
551 struct sockbuf *sb;
552 struct socket *so;
553 {
554
555 SOCKBUF_LOCK_ASSERT(sb);
556
557 sbflush_locked(sb);
558 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
559 RLIM_INFINITY);
560 sb->sb_mbmax = 0;
561 }
562
563 void
564 sbrelease(sb, so)
565 struct sockbuf *sb;
566 struct socket *so;
567 {
568
569 SOCKBUF_LOCK(sb);
570 sbrelease_locked(sb, so);
571 SOCKBUF_UNLOCK(sb);
572 }
573 /*
574 * Routines to add and remove
575 * data from an mbuf queue.
576 *
577 * The routines sbappend() or sbappendrecord() are normally called to
578 * append new mbufs to a socket buffer, after checking that adequate
579 * space is available, comparing the function sbspace() with the amount
580 * of data to be added. sbappendrecord() differs from sbappend() in
581 * that data supplied is treated as the beginning of a new record.
582 * To place a sender's address, optional access rights, and data in a
583 * socket receive buffer, sbappendaddr() should be used. To place
584 * access rights and data in a socket receive buffer, sbappendrights()
585 * should be used. In either case, the new data begins a new record.
586 * Note that unlike sbappend() and sbappendrecord(), these routines check
587 * for the caller that there will be enough space to store the data.
588 * Each fails if there is not enough space, or if it cannot find mbufs
589 * to store additional information in.
590 *
591 * Reliable protocols may use the socket send buffer to hold data
592 * awaiting acknowledgement. Data is normally copied from a socket
593 * send buffer in a protocol with m_copy for output to a peer,
594 * and then removing the data from the socket buffer with sbdrop()
595 * or sbdroprecord() when the data is acknowledged by the peer.
596 */
597
598 #ifdef SOCKBUF_DEBUG
599 void
600 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
601 {
602 struct mbuf *m = sb->sb_mb;
603
604 SOCKBUF_LOCK_ASSERT(sb);
605
606 while (m && m->m_nextpkt)
607 m = m->m_nextpkt;
608
609 if (m != sb->sb_lastrecord) {
610 printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
611 __func__, sb->sb_mb, sb->sb_lastrecord, m);
612 printf("packet chain:\n");
613 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
614 printf("\t%p\n", m);
615 panic("%s from %s:%u", __func__, file, line);
616 }
617 }
618
619 void
620 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
621 {
622 struct mbuf *m = sb->sb_mb;
623 struct mbuf *n;
624
625 SOCKBUF_LOCK_ASSERT(sb);
626
627 while (m && m->m_nextpkt)
628 m = m->m_nextpkt;
629
630 while (m && m->m_next)
631 m = m->m_next;
632
633 if (m != sb->sb_mbtail) {
634 printf("%s: sb_mb %p sb_mbtail %p last %p\n",
635 __func__, sb->sb_mb, sb->sb_mbtail, m);
636 printf("packet tree:\n");
637 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
638 printf("\t");
639 for (n = m; n != NULL; n = n->m_next)
640 printf("%p ", n);
641 printf("\n");
642 }
643 panic("%s from %s:%u", __func__, file, line);
644 }
645 }
646 #endif /* SOCKBUF_DEBUG */
647
648 #define SBLINKRECORD(sb, m0) do { \
649 SOCKBUF_LOCK_ASSERT(sb); \
650 if ((sb)->sb_lastrecord != NULL) \
651 (sb)->sb_lastrecord->m_nextpkt = (m0); \
652 else \
653 (sb)->sb_mb = (m0); \
654 (sb)->sb_lastrecord = (m0); \
655 } while (/*CONSTCOND*/0)
656
657 /*
658 * Append mbuf chain m to the last record in the
659 * socket buffer sb. The additional space associated
660 * the mbuf chain is recorded in sb. Empty mbufs are
661 * discarded and mbufs are compacted where possible.
662 */
663 void
664 sbappend_locked(sb, m)
665 struct sockbuf *sb;
666 struct mbuf *m;
667 {
668 register struct mbuf *n;
669
670 SOCKBUF_LOCK_ASSERT(sb);
671
672 if (m == 0)
673 return;
674
675 SBLASTRECORDCHK(sb);
676 n = sb->sb_mb;
677 if (n) {
678 while (n->m_nextpkt)
679 n = n->m_nextpkt;
680 do {
681 if (n->m_flags & M_EOR) {
682 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
683 return;
684 }
685 } while (n->m_next && (n = n->m_next));
686 } else {
687 /*
688 * XXX Would like to simply use sb_mbtail here, but
689 * XXX I need to verify that I won't miss an EOR that
690 * XXX way.
691 */
692 if ((n = sb->sb_lastrecord) != NULL) {
693 do {
694 if (n->m_flags & M_EOR) {
695 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
696 return;
697 }
698 } while (n->m_next && (n = n->m_next));
699 } else {
700 /*
701 * If this is the first record in the socket buffer,
702 * it's also the last record.
703 */
704 sb->sb_lastrecord = m;
705 }
706 }
707 sbcompress(sb, m, n);
708 SBLASTRECORDCHK(sb);
709 }
710
711 /*
712 * Append mbuf chain m to the last record in the
713 * socket buffer sb. The additional space associated
714 * the mbuf chain is recorded in sb. Empty mbufs are
715 * discarded and mbufs are compacted where possible.
716 */
717 void
718 sbappend(sb, m)
719 struct sockbuf *sb;
720 struct mbuf *m;
721 {
722
723 SOCKBUF_LOCK(sb);
724 sbappend_locked(sb, m);
725 SOCKBUF_UNLOCK(sb);
726 }
727
728 /*
729 * This version of sbappend() should only be used when the caller
730 * absolutely knows that there will never be more than one record
731 * in the socket buffer, that is, a stream protocol (such as TCP).
732 */
733 void
734 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
735 {
736 SOCKBUF_LOCK_ASSERT(sb);
737
738 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
739 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
740
741 SBLASTMBUFCHK(sb);
742
743 sbcompress(sb, m, sb->sb_mbtail);
744
745 sb->sb_lastrecord = sb->sb_mb;
746 SBLASTRECORDCHK(sb);
747 }
748
749 /*
750 * This version of sbappend() should only be used when the caller
751 * absolutely knows that there will never be more than one record
752 * in the socket buffer, that is, a stream protocol (such as TCP).
753 */
754 void
755 sbappendstream(struct sockbuf *sb, struct mbuf *m)
756 {
757
758 SOCKBUF_LOCK(sb);
759 sbappendstream_locked(sb, m);
760 SOCKBUF_UNLOCK(sb);
761 }
762
763 #ifdef SOCKBUF_DEBUG
764 void
765 sbcheck(sb)
766 struct sockbuf *sb;
767 {
768 struct mbuf *m;
769 struct mbuf *n = 0;
770 u_long len = 0, mbcnt = 0;
771
772 SOCKBUF_LOCK_ASSERT(sb);
773
774 for (m = sb->sb_mb; m; m = n) {
775 n = m->m_nextpkt;
776 for (; m; m = m->m_next) {
777 len += m->m_len;
778 mbcnt += MSIZE;
779 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
780 mbcnt += m->m_ext.ext_size;
781 }
782 }
783 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
784 printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
785 mbcnt, sb->sb_mbcnt);
786 panic("sbcheck");
787 }
788 }
789 #endif
790
791 /*
792 * As above, except the mbuf chain
793 * begins a new record.
794 */
795 void
796 sbappendrecord_locked(sb, m0)
797 register struct sockbuf *sb;
798 register struct mbuf *m0;
799 {
800 register struct mbuf *m;
801
802 SOCKBUF_LOCK_ASSERT(sb);
803
804 if (m0 == 0)
805 return;
806 m = sb->sb_mb;
807 if (m)
808 while (m->m_nextpkt)
809 m = m->m_nextpkt;
810 /*
811 * Put the first mbuf on the queue.
812 * Note this permits zero length records.
813 */
814 sballoc(sb, m0);
815 SBLASTRECORDCHK(sb);
816 SBLINKRECORD(sb, m0);
817 if (m)
818 m->m_nextpkt = m0;
819 else
820 sb->sb_mb = m0;
821 m = m0->m_next;
822 m0->m_next = 0;
823 if (m && (m0->m_flags & M_EOR)) {
824 m0->m_flags &= ~M_EOR;
825 m->m_flags |= M_EOR;
826 }
827 sbcompress(sb, m, m0);
828 }
829
830 /*
831 * As above, except the mbuf chain
832 * begins a new record.
833 */
834 void
835 sbappendrecord(sb, m0)
836 register struct sockbuf *sb;
837 register struct mbuf *m0;
838 {
839
840 SOCKBUF_LOCK(sb);
841 sbappendrecord_locked(sb, m0);
842 SOCKBUF_UNLOCK(sb);
843 }
844
845 /*
846 * As above except that OOB data
847 * is inserted at the beginning of the sockbuf,
848 * but after any other OOB data.
849 */
850 void
851 sbinsertoob_locked(sb, m0)
852 register struct sockbuf *sb;
853 register struct mbuf *m0;
854 {
855 register struct mbuf *m;
856 register struct mbuf **mp;
857
858 SOCKBUF_LOCK_ASSERT(sb);
859
860 if (m0 == 0)
861 return;
862 for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) {
863 m = *mp;
864 again:
865 switch (m->m_type) {
866
867 case MT_OOBDATA:
868 continue; /* WANT next train */
869
870 case MT_CONTROL:
871 m = m->m_next;
872 if (m)
873 goto again; /* inspect THIS train further */
874 }
875 break;
876 }
877 /*
878 * Put the first mbuf on the queue.
879 * Note this permits zero length records.
880 */
881 sballoc(sb, m0);
882 m0->m_nextpkt = *mp;
883 *mp = m0;
884 m = m0->m_next;
885 m0->m_next = 0;
886 if (m && (m0->m_flags & M_EOR)) {
887 m0->m_flags &= ~M_EOR;
888 m->m_flags |= M_EOR;
889 }
890 sbcompress(sb, m, m0);
891 }
892
893 /*
894 * As above except that OOB data
895 * is inserted at the beginning of the sockbuf,
896 * but after any other OOB data.
897 */
898 void
899 sbinsertoob(sb, m0)
900 register struct sockbuf *sb;
901 register struct mbuf *m0;
902 {
903
904 SOCKBUF_LOCK(sb);
905 sbinsertoob_locked(sb, m0);
906 SOCKBUF_UNLOCK(sb);
907 }
908
909 /*
910 * Append address and data, and optionally, control (ancillary) data
911 * to the receive queue of a socket. If present,
912 * m0 must include a packet header with total length.
913 * Returns 0 if no space in sockbuf or insufficient mbufs.
914 */
915 int
916 sbappendaddr_locked(sb, asa, m0, control)
917 struct sockbuf *sb;
918 const struct sockaddr *asa;
919 struct mbuf *m0, *control;
920 {
921 struct mbuf *m, *n, *nlast;
922 int space = asa->sa_len;
923
924 SOCKBUF_LOCK_ASSERT(sb);
925
926 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
927 panic("sbappendaddr_locked");
928 if (m0)
929 space += m0->m_pkthdr.len;
930 space += m_length(control, &n);
931
932 if (space > sbspace(sb))
933 return (0);
934 #if MSIZE <= 256
935 if (asa->sa_len > MLEN)
936 return (0);
937 #endif
938 MGET(m, M_DONTWAIT, MT_SONAME);
939 if (m == 0)
940 return (0);
941 m->m_len = asa->sa_len;
942 bcopy(asa, mtod(m, caddr_t), asa->sa_len);
943 if (n)
944 n->m_next = m0; /* concatenate data to control */
945 else
946 control = m0;
947 m->m_next = control;
948 for (n = m; n->m_next != NULL; n = n->m_next)
949 sballoc(sb, n);
950 sballoc(sb, n);
951 nlast = n;
952 SBLINKRECORD(sb, m);
953
954 sb->sb_mbtail = nlast;
955 SBLASTMBUFCHK(sb);
956
957 SBLASTRECORDCHK(sb);
958 return (1);
959 }
960
961 /*
962 * Append address and data, and optionally, control (ancillary) data
963 * to the receive queue of a socket. If present,
964 * m0 must include a packet header with total length.
965 * Returns 0 if no space in sockbuf or insufficient mbufs.
966 */
967 int
968 sbappendaddr(sb, asa, m0, control)
969 struct sockbuf *sb;
970 const struct sockaddr *asa;
971 struct mbuf *m0, *control;
972 {
973 int retval;
974
975 SOCKBUF_LOCK(sb);
976 retval = sbappendaddr_locked(sb, asa, m0, control);
977 SOCKBUF_UNLOCK(sb);
978 return (retval);
979 }
980
981 int
982 sbappendcontrol_locked(sb, m0, control)
983 struct sockbuf *sb;
984 struct mbuf *control, *m0;
985 {
986 struct mbuf *m, *n, *mlast;
987 int space;
988
989 SOCKBUF_LOCK_ASSERT(sb);
990
991 if (control == 0)
992 panic("sbappendcontrol_locked");
993 space = m_length(control, &n) + m_length(m0, NULL);
994
995 if (space > sbspace(sb))
996 return (0);
997 n->m_next = m0; /* concatenate data to control */
998
999 SBLASTRECORDCHK(sb);
1000
1001 for (m = control; m->m_next; m = m->m_next)
1002 sballoc(sb, m);
1003 sballoc(sb, m);
1004 mlast = m;
1005 SBLINKRECORD(sb, control);
1006
1007 sb->sb_mbtail = mlast;
1008 SBLASTMBUFCHK(sb);
1009
1010 SBLASTRECORDCHK(sb);
1011 return (1);
1012 }
1013
1014 int
1015 sbappendcontrol(sb, m0, control)
1016 struct sockbuf *sb;
1017 struct mbuf *control, *m0;
1018 {
1019 int retval;
1020
1021 SOCKBUF_LOCK(sb);
1022 retval = sbappendcontrol_locked(sb, m0, control);
1023 SOCKBUF_UNLOCK(sb);
1024 return (retval);
1025 }
1026
1027 /*
1028 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
1029 * (n). If (n) is NULL, the buffer is presumed empty.
1030 *
1031 * When the data is compressed, mbufs in the chain may be handled in one of
1032 * three ways:
1033 *
1034 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
1035 * record boundary, and no change in data type).
1036 *
1037 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
1038 * an mbuf already in the socket buffer. This can occur if an
1039 * appropriate mbuf exists, there is room, and no merging of data types
1040 * will occur.
1041 *
1042 * (3) The mbuf may be appended to the end of the existing mbuf chain.
1043 *
1044 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
1045 * end-of-record.
1046 */
1047 void
1048 sbcompress(sb, m, n)
1049 register struct sockbuf *sb;
1050 register struct mbuf *m, *n;
1051 {
1052 register int eor = 0;
1053 register struct mbuf *o;
1054
1055 SOCKBUF_LOCK_ASSERT(sb);
1056
1057 while (m) {
1058 eor |= m->m_flags & M_EOR;
1059 if (m->m_len == 0 &&
1060 (eor == 0 ||
1061 (((o = m->m_next) || (o = n)) &&
1062 o->m_type == m->m_type))) {
1063 if (sb->sb_lastrecord == m)
1064 sb->sb_lastrecord = m->m_next;
1065 m = m_free(m);
1066 continue;
1067 }
1068 if (n && (n->m_flags & M_EOR) == 0 &&
1069 M_WRITABLE(n) &&
1070 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1071 m->m_len <= M_TRAILINGSPACE(n) &&
1072 n->m_type == m->m_type) {
1073 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1074 (unsigned)m->m_len);
1075 n->m_len += m->m_len;
1076 sb->sb_cc += m->m_len;
1077 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1078 m->m_type != MT_OOBDATA)
1079 /* XXX: Probably don't need.*/
1080 sb->sb_ctl += m->m_len;
1081 m = m_free(m);
1082 continue;
1083 }
1084 if (n)
1085 n->m_next = m;
1086 else
1087 sb->sb_mb = m;
1088 sb->sb_mbtail = m;
1089 sballoc(sb, m);
1090 n = m;
1091 m->m_flags &= ~M_EOR;
1092 m = m->m_next;
1093 n->m_next = 0;
1094 }
1095 if (eor) {
1096 KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
1097 n->m_flags |= eor;
1098 }
1099 SBLASTMBUFCHK(sb);
1100 }
1101
1102 /*
1103 * Free all mbufs in a sockbuf.
1104 * Check that all resources are reclaimed.
1105 */
1106 void
1107 sbflush_locked(sb)
1108 register struct sockbuf *sb;
1109 {
1110
1111 SOCKBUF_LOCK_ASSERT(sb);
1112
1113 if (sb->sb_flags & SB_LOCK)
1114 panic("sbflush_locked: locked");
1115 while (sb->sb_mbcnt) {
1116 /*
1117 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1118 * we would loop forever. Panic instead.
1119 */
1120 if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1121 break;
1122 sbdrop_locked(sb, (int)sb->sb_cc);
1123 }
1124 if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
1125 panic("sbflush_locked: cc %u || mb %p || mbcnt %u", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
1126 }
1127
1128 void
1129 sbflush(sb)
1130 register struct sockbuf *sb;
1131 {
1132
1133 SOCKBUF_LOCK(sb);
1134 sbflush_locked(sb);
1135 SOCKBUF_UNLOCK(sb);
1136 }
1137
1138 /*
1139 * Drop data from (the front of) a sockbuf.
1140 */
1141 void
1142 sbdrop_locked(sb, len)
1143 register struct sockbuf *sb;
1144 register int len;
1145 {
1146 register struct mbuf *m;
1147 struct mbuf *next;
1148
1149 SOCKBUF_LOCK_ASSERT(sb);
1150
1151 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1152 while (len > 0) {
1153 if (m == 0) {
1154 if (next == 0)
1155 panic("sbdrop");
1156 m = next;
1157 next = m->m_nextpkt;
1158 continue;
1159 }
1160 if (m->m_len > len) {
1161 m->m_len -= len;
1162 m->m_data += len;
1163 sb->sb_cc -= len;
1164 if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1165 m->m_type != MT_OOBDATA)
1166 sb->sb_ctl -= len;
1167 break;
1168 }
1169 len -= m->m_len;
1170 sbfree(sb, m);
1171 m = m_free(m);
1172 }
1173 while (m && m->m_len == 0) {
1174 sbfree(sb, m);
1175 m = m_free(m);
1176 }
1177 if (m) {
1178 sb->sb_mb = m;
1179 m->m_nextpkt = next;
1180 } else
1181 sb->sb_mb = next;
1182 /*
1183 * First part is an inline SB_EMPTY_FIXUP(). Second part
1184 * makes sure sb_lastrecord is up-to-date if we dropped
1185 * part of the last record.
1186 */
1187 m = sb->sb_mb;
1188 if (m == NULL) {
1189 sb->sb_mbtail = NULL;
1190 sb->sb_lastrecord = NULL;
1191 } else if (m->m_nextpkt == NULL) {
1192 sb->sb_lastrecord = m;
1193 }
1194 }
1195
1196 /*
1197 * Drop data from (the front of) a sockbuf.
1198 */
1199 void
1200 sbdrop(sb, len)
1201 register struct sockbuf *sb;
1202 register int len;
1203 {
1204
1205 SOCKBUF_LOCK(sb);
1206 sbdrop_locked(sb, len);
1207 SOCKBUF_UNLOCK(sb);
1208 }
1209
1210 /*
1211 * Drop a record off the front of a sockbuf
1212 * and move the next record to the front.
1213 */
1214 void
1215 sbdroprecord_locked(sb)
1216 register struct sockbuf *sb;
1217 {
1218 register struct mbuf *m;
1219
1220 SOCKBUF_LOCK_ASSERT(sb);
1221
1222 m = sb->sb_mb;
1223 if (m) {
1224 sb->sb_mb = m->m_nextpkt;
1225 do {
1226 sbfree(sb, m);
1227 m = m_free(m);
1228 } while (m);
1229 }
1230 SB_EMPTY_FIXUP(sb);
1231 }
1232
1233 /*
1234 * Drop a record off the front of a sockbuf
1235 * and move the next record to the front.
1236 */
1237 void
1238 sbdroprecord(sb)
1239 register struct sockbuf *sb;
1240 {
1241
1242 SOCKBUF_LOCK(sb);
1243 sbdroprecord_locked(sb);
1244 SOCKBUF_UNLOCK(sb);
1245 }
1246
1247 /*
1248 * Create a "control" mbuf containing the specified data
1249 * with the specified type for presentation on a socket buffer.
1250 */
1251 struct mbuf *
1252 sbcreatecontrol(p, size, type, level)
1253 caddr_t p;
1254 register int size;
1255 int type, level;
1256 {
1257 register struct cmsghdr *cp;
1258 struct mbuf *m;
1259
1260 if (CMSG_SPACE((u_int)size) > MCLBYTES)
1261 return ((struct mbuf *) NULL);
1262 if (CMSG_SPACE((u_int)size) > MLEN)
1263 m = m_getcl(M_DONTWAIT, MT_CONTROL, 0);
1264 else
1265 m = m_get(M_DONTWAIT, MT_CONTROL);
1266 if (m == NULL)
1267 return ((struct mbuf *) NULL);
1268 cp = mtod(m, struct cmsghdr *);
1269 m->m_len = 0;
1270 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1271 ("sbcreatecontrol: short mbuf"));
1272 if (p != NULL)
1273 (void)memcpy(CMSG_DATA(cp), p, size);
1274 m->m_len = CMSG_SPACE(size);
1275 cp->cmsg_len = CMSG_LEN(size);
1276 cp->cmsg_level = level;
1277 cp->cmsg_type = type;
1278 return (m);
1279 }
1280
1281 /*
1282 * Some routines that return EOPNOTSUPP for entry points that are not
1283 * supported by a protocol. Fill in as needed.
1284 */
1285 int
1286 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
1287 {
1288 return EOPNOTSUPP;
1289 }
1290
1291 int
1292 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
1293 {
1294 return EOPNOTSUPP;
1295 }
1296
1297 int
1298 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
1299 {
1300 return EOPNOTSUPP;
1301 }
1302
1303 int
1304 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
1305 struct ifnet *ifp, struct thread *td)
1306 {
1307 return EOPNOTSUPP;
1308 }
1309
1310 int
1311 pru_listen_notsupp(struct socket *so, struct thread *td)
1312 {
1313 return EOPNOTSUPP;
1314 }
1315
1316 int
1317 pru_rcvd_notsupp(struct socket *so, int flags)
1318 {
1319 return EOPNOTSUPP;
1320 }
1321
1322 int
1323 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
1324 {
1325 return EOPNOTSUPP;
1326 }
1327
1328 /*
1329 * This isn't really a ``null'' operation, but it's the default one
1330 * and doesn't do anything destructive.
1331 */
1332 int
1333 pru_sense_null(struct socket *so, struct stat *sb)
1334 {
1335 sb->st_blksize = so->so_snd.sb_hiwat;
1336 return 0;
1337 }
1338
1339 /*
1340 * For protocol types that don't keep cached copies of labels in their
1341 * pcbs, provide a null sosetlabel that does a NOOP.
1342 */
1343 void
1344 pru_sosetlabel_null(struct socket *so)
1345 {
1346
1347 }
1348
1349 /*
1350 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
1351 */
1352 struct sockaddr *
1353 sodupsockaddr(const struct sockaddr *sa, int mflags)
1354 {
1355 struct sockaddr *sa2;
1356
1357 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
1358 if (sa2)
1359 bcopy(sa, sa2, sa->sa_len);
1360 return sa2;
1361 }
1362
1363 /*
1364 * Create an external-format (``xsocket'') structure using the information
1365 * in the kernel-format socket structure pointed to by so. This is done
1366 * to reduce the spew of irrelevant information over this interface,
1367 * to isolate user code from changes in the kernel structure, and
1368 * potentially to provide information-hiding if we decide that
1369 * some of this information should be hidden from users.
1370 */
1371 void
1372 sotoxsocket(struct socket *so, struct xsocket *xso)
1373 {
1374 xso->xso_len = sizeof *xso;
1375 xso->xso_so = so;
1376 xso->so_type = so->so_type;
1377 xso->so_options = so->so_options;
1378 xso->so_linger = so->so_linger;
1379 xso->so_state = so->so_state;
1380 xso->so_pcb = so->so_pcb;
1381 xso->xso_protocol = so->so_proto->pr_protocol;
1382 xso->xso_family = so->so_proto->pr_domain->dom_family;
1383 xso->so_qlen = so->so_qlen;
1384 xso->so_incqlen = so->so_incqlen;
1385 xso->so_qlimit = so->so_qlimit;
1386 xso->so_timeo = so->so_timeo;
1387 xso->so_error = so->so_error;
1388 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
1389 xso->so_oobmark = so->so_oobmark;
1390 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
1391 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
1392 xso->so_uid = so->so_cred->cr_uid;
1393 }
1394
1395 /*
1396 * This does the same for sockbufs. Note that the xsockbuf structure,
1397 * since it is always embedded in a socket, does not include a self
1398 * pointer nor a length. We make this entry point public in case
1399 * some other mechanism needs it.
1400 */
1401 void
1402 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1403 {
1404 xsb->sb_cc = sb->sb_cc;
1405 xsb->sb_hiwat = sb->sb_hiwat;
1406 xsb->sb_mbcnt = sb->sb_mbcnt;
1407 xsb->sb_mbmax = sb->sb_mbmax;
1408 xsb->sb_lowat = sb->sb_lowat;
1409 xsb->sb_flags = sb->sb_flags;
1410 xsb->sb_timeo = sb->sb_timeo;
1411 }
1412
1413 /*
1414 * Here is the definition of some of the basic objects in the kern.ipc
1415 * branch of the MIB.
1416 */
1417 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
1418
1419 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1420 static int dummy;
1421 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1422 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1423 &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1424 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RDTUN,
1425 &maxsockets, 0, "Maximum number of sockets avaliable");
1426 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1427 &sb_efficiency, 0, "");
1428
1429 /*
1430 * Initialise maxsockets
1431 */
1432 static void init_maxsockets(void *ignored)
1433 {
1434 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
1435 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
1436 }
1437 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
Cache object: d2bba02474d39b12fa8d95d87c7833c1
|