1 /*-
2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 /*
39 * Socket operations for use by nfs
40 */
41
42 #include "opt_mac.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/protosw.h>
54 #include <sys/refcount.h>
55 #include <sys/signalvar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/vnode.h>
61
62 #include <netinet/in.h>
63 #include <netinet/tcp.h>
64
65 #include <nfs/rpcv2.h>
66 #include <nfs/nfsproto.h>
67 #include <nfsserver/nfs.h>
68 #include <nfs/xdr_subs.h>
69 #include <nfsserver/nfsm_subs.h>
70
71 #include <security/mac/mac_framework.h>
72
73 #define TRUE 1
74 #define FALSE 0
75
76 static int nfs_realign_test;
77 static int nfs_realign_count;
78
79 SYSCTL_DECL(_vfs_nfsrv);
80
81 SYSCTL_INT(_vfs_nfsrv, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
82 SYSCTL_INT(_vfs_nfsrv, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
83
84
85 /*
86 * There is a congestion window for outstanding rpcs maintained per mount
87 * point. The cwnd size is adjusted in roughly the way that:
88 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
89 * SIGCOMM '88". ACM, August 1988.
90 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
91 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
92 * of rpcs is in progress.
93 * (The sent count and cwnd are scaled for integer arith.)
94 * Variants of "slow start" were tried and were found to be too much of a
95 * performance hit (ave. rtt 3 times larger),
96 * I suspect due to the large rtt that nfs rpcs have.
97 */
98 #define NFS_CWNDSCALE 256
99 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
100 struct callout nfsrv_callout;
101
102 static void nfs_realign(struct mbuf **pm, int hsiz); /* XXX SHARED */
103 static int nfsrv_getstream(struct nfssvc_sock *, int);
104
105 int32_t (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
106 struct nfssvc_sock *slp,
107 struct thread *td,
108 struct mbuf **mreqp) = {
109 nfsrv_null,
110 nfsrv_getattr,
111 nfsrv_setattr,
112 nfsrv_lookup,
113 nfsrv3_access,
114 nfsrv_readlink,
115 nfsrv_read,
116 nfsrv_write,
117 nfsrv_create,
118 nfsrv_mkdir,
119 nfsrv_symlink,
120 nfsrv_mknod,
121 nfsrv_remove,
122 nfsrv_rmdir,
123 nfsrv_rename,
124 nfsrv_link,
125 nfsrv_readdir,
126 nfsrv_readdirplus,
127 nfsrv_statfs,
128 nfsrv_fsinfo,
129 nfsrv_pathconf,
130 nfsrv_commit,
131 nfsrv_noop
132 };
133
134
135 /*
136 * Generate the rpc reply header
137 * siz arg. is used to decide if adding a cluster is worthwhile
138 */
139 struct mbuf *
140 nfs_rephead(int siz, struct nfsrv_descript *nd, int err,
141 struct mbuf **mbp, caddr_t *bposp)
142 {
143 u_int32_t *tl;
144 struct mbuf *mreq;
145 caddr_t bpos;
146 struct mbuf *mb;
147
148 nd->nd_repstat = err;
149 if (err && (nd->nd_flag & ND_NFSV3) == 0) /* XXX recheck */
150 siz = 0;
151 MGETHDR(mreq, M_TRYWAIT, MT_DATA);
152 mb = mreq;
153 /*
154 * If this is a big reply, use a cluster else
155 * try and leave leading space for the lower level headers.
156 */
157 mreq->m_len = 6 * NFSX_UNSIGNED;
158 siz += RPC_REPLYSIZ;
159 if ((max_hdr + siz) >= MINCLSIZE) {
160 MCLGET(mreq, M_TRYWAIT);
161 } else
162 mreq->m_data += min(max_hdr, M_TRAILINGSPACE(mreq));
163 tl = mtod(mreq, u_int32_t *);
164 bpos = ((caddr_t)tl) + mreq->m_len;
165 *tl++ = txdr_unsigned(nd->nd_retxid);
166 *tl++ = nfsrv_rpc_reply;
167 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
168 *tl++ = nfsrv_rpc_msgdenied;
169 if (err & NFSERR_AUTHERR) {
170 *tl++ = nfsrv_rpc_autherr;
171 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
172 mreq->m_len -= NFSX_UNSIGNED;
173 bpos -= NFSX_UNSIGNED;
174 } else {
175 *tl++ = nfsrv_rpc_mismatch;
176 *tl++ = txdr_unsigned(RPC_VER2);
177 *tl = txdr_unsigned(RPC_VER2);
178 }
179 } else {
180 *tl++ = nfsrv_rpc_msgaccepted;
181 /*
182 * Send a RPCAUTH_NULL verifier - no Kerberos.
183 */
184 *tl++ = 0;
185 *tl++ = 0;
186 switch (err) {
187 case EPROGUNAVAIL:
188 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
189 break;
190 case EPROGMISMATCH:
191 *tl = txdr_unsigned(RPC_PROGMISMATCH);
192 tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
193 *tl++ = txdr_unsigned(2);
194 *tl = txdr_unsigned(3);
195 break;
196 case EPROCUNAVAIL:
197 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
198 break;
199 case EBADRPC:
200 *tl = txdr_unsigned(RPC_GARBAGE);
201 break;
202 default:
203 *tl = 0;
204 if (err != NFSERR_RETVOID) {
205 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
206 if (err)
207 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
208 else
209 *tl = 0;
210 }
211 break;
212 }
213 }
214 *mbp = mb;
215 *bposp = bpos;
216 if (err != 0 && err != NFSERR_RETVOID)
217 nfsrvstats.srvrpc_errs++;
218 return mreq;
219 }
220
221
222 /*
223 * nfs_realign:
224 *
225 * Check for badly aligned mbuf data and realign by copying the unaligned
226 * portion of the data into a new mbuf chain and freeing the portions
227 * of the old chain that were replaced.
228 *
229 * We cannot simply realign the data within the existing mbuf chain
230 * because the underlying buffers may contain other rpc commands and
231 * we cannot afford to overwrite them.
232 *
233 * We would prefer to avoid this situation entirely. The situation does
234 * not occur with NFS/UDP and is supposed to only occassionally occur
235 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
236 */
237 static void
238 nfs_realign(struct mbuf **pm, int hsiz) /* XXX COMMON */
239 {
240 struct mbuf *m;
241 struct mbuf *n = NULL;
242 int off = 0;
243
244 ++nfs_realign_test;
245 while ((m = *pm) != NULL) {
246 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
247 MGET(n, M_TRYWAIT, MT_DATA);
248 if (m->m_len >= MINCLSIZE) {
249 MCLGET(n, M_TRYWAIT);
250 }
251 n->m_len = 0;
252 break;
253 }
254 pm = &m->m_next;
255 }
256
257 /*
258 * If n is non-NULL, loop on m copying data, then replace the
259 * portion of the chain that had to be realigned.
260 */
261 if (n != NULL) {
262 ++nfs_realign_count;
263 while (m) {
264 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
265 off += m->m_len;
266 m = m->m_next;
267 }
268 m_freem(*pm);
269 *pm = n;
270 }
271 }
272
273
274 /*
275 * Parse an RPC request
276 * - verify it
277 * - fill in the cred struct.
278 */
279 int
280 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
281 {
282 int len, i;
283 u_int32_t *tl;
284 caddr_t dpos;
285 u_int32_t nfsvers, auth_type;
286 int error = 0;
287 struct mbuf *mrep, *md;
288
289 NFSD_LOCK_ASSERT();
290
291 mrep = nd->nd_mrep;
292 md = nd->nd_md;
293 dpos = nd->nd_dpos;
294 if (has_header) {
295 tl = nfsm_dissect_nonblock(u_int32_t *, 10 * NFSX_UNSIGNED);
296 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
297 if (*tl++ != nfsrv_rpc_call) {
298 m_freem(mrep);
299 return (EBADRPC);
300 }
301 } else
302 tl = nfsm_dissect_nonblock(u_int32_t *, 8 * NFSX_UNSIGNED);
303 nd->nd_repstat = 0;
304 nd->nd_flag = 0;
305 if (*tl++ != nfsrv_rpc_vers) {
306 nd->nd_repstat = ERPCMISMATCH;
307 nd->nd_procnum = NFSPROC_NOOP;
308 return (0);
309 }
310 if (*tl != nfsrv_nfs_prog) {
311 nd->nd_repstat = EPROGUNAVAIL;
312 nd->nd_procnum = NFSPROC_NOOP;
313 return (0);
314 }
315 tl++;
316 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
317 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
318 nd->nd_repstat = EPROGMISMATCH;
319 nd->nd_procnum = NFSPROC_NOOP;
320 return (0);
321 }
322 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
323 if (nd->nd_procnum == NFSPROC_NULL)
324 return (0);
325 if (nfsvers == NFS_VER3) {
326 nd->nd_flag = ND_NFSV3;
327 if (nd->nd_procnum >= NFS_NPROCS) {
328 nd->nd_repstat = EPROCUNAVAIL;
329 nd->nd_procnum = NFSPROC_NOOP;
330 return (0);
331 }
332 } else {
333 if (nd->nd_procnum > NFSV2PROC_STATFS) {
334 nd->nd_repstat = EPROCUNAVAIL;
335 nd->nd_procnum = NFSPROC_NOOP;
336 return (0);
337 }
338 /* Map the v2 procedure numbers into v3 ones */
339 nd->nd_procnum = nfsrv_nfsv3_procid[nd->nd_procnum];
340 }
341 auth_type = *tl++;
342 len = fxdr_unsigned(int, *tl++);
343 if (len < 0 || len > RPCAUTH_MAXSIZ) {
344 m_freem(mrep);
345 return (EBADRPC);
346 }
347
348 /*
349 * Handle auth_unix;
350 */
351 if (auth_type == nfsrv_rpc_auth_unix) {
352 len = fxdr_unsigned(int, *++tl);
353 if (len < 0 || len > NFS_MAXNAMLEN) {
354 m_freem(mrep);
355 return (EBADRPC);
356 }
357 nfsm_adv(nfsm_rndup(len));
358 tl = nfsm_dissect_nonblock(u_int32_t *, 3 * NFSX_UNSIGNED);
359 nd->nd_cr->cr_uid = nd->nd_cr->cr_ruid =
360 nd->nd_cr->cr_svuid = fxdr_unsigned(uid_t, *tl++);
361 nd->nd_cr->cr_groups[0] = nd->nd_cr->cr_rgid =
362 nd->nd_cr->cr_svgid = fxdr_unsigned(gid_t, *tl++);
363 #ifdef MAC
364 mac_associate_nfsd_label(nd->nd_cr);
365 #endif
366 len = fxdr_unsigned(int, *tl);
367 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
368 m_freem(mrep);
369 return (EBADRPC);
370 }
371 tl = nfsm_dissect_nonblock(u_int32_t *, (len + 2) * NFSX_UNSIGNED);
372 for (i = 1; i <= len; i++)
373 if (i < NGROUPS)
374 nd->nd_cr->cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
375 else
376 tl++;
377 nd->nd_cr->cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
378 if (nd->nd_cr->cr_ngroups > 1)
379 nfsrvw_sort(nd->nd_cr->cr_groups, nd->nd_cr->cr_ngroups);
380 len = fxdr_unsigned(int, *++tl);
381 if (len < 0 || len > RPCAUTH_MAXSIZ) {
382 m_freem(mrep);
383 return (EBADRPC);
384 }
385 if (len > 0)
386 nfsm_adv(nfsm_rndup(len));
387 } else {
388 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
389 nd->nd_procnum = NFSPROC_NOOP;
390 return (0);
391 }
392
393 nd->nd_md = md;
394 nd->nd_dpos = dpos;
395 return (0);
396 nfsmout:
397 return (error);
398 }
399
400 /*
401 * Socket upcall routine for the nfsd sockets.
402 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
403 * Essentially do as much as possible non-blocking, else punt and it will
404 * be called with M_TRYWAIT from an nfsd.
405 */
406 void
407 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
408 {
409 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
410 struct mbuf *m;
411 struct mbuf *mp;
412 struct sockaddr *nam;
413 struct uio auio;
414 int flags, error;
415
416 NFSD_UNLOCK_ASSERT();
417
418 /* XXXRW: Unlocked read. */
419 if ((slp->ns_flag & SLP_VALID) == 0)
420 return;
421
422 /*
423 * We can't do this in the context of a socket callback
424 * because we're called with locks held.
425 * XXX: SMP
426 */
427 if (waitflag == M_DONTWAIT) {
428 NFSD_LOCK();
429 slp->ns_flag |= SLP_NEEDQ;
430 goto dorecs;
431 }
432
433
434 NFSD_LOCK();
435 auio.uio_td = NULL;
436 if (so->so_type == SOCK_STREAM) {
437 /*
438 * If there are already records on the queue, defer soreceive()
439 * to an nfsd so that there is feedback to the TCP layer that
440 * the nfs servers are heavily loaded.
441 */
442 if (STAILQ_FIRST(&slp->ns_rec) != NULL &&
443 waitflag == M_DONTWAIT) {
444 slp->ns_flag |= SLP_NEEDQ;
445 goto dorecs;
446 }
447
448 /*
449 * Do soreceive().
450 */
451 auio.uio_resid = 1000000000;
452 flags = MSG_DONTWAIT;
453 NFSD_UNLOCK();
454 error = soreceive(so, &nam, &auio, &mp, NULL, &flags);
455 NFSD_LOCK();
456 if (error || mp == NULL) {
457 if (error == EWOULDBLOCK)
458 slp->ns_flag |= SLP_NEEDQ;
459 else
460 slp->ns_flag |= SLP_DISCONN;
461 goto dorecs;
462 }
463 m = mp;
464 if (slp->ns_rawend) {
465 slp->ns_rawend->m_next = m;
466 slp->ns_cc += 1000000000 - auio.uio_resid;
467 } else {
468 slp->ns_raw = m;
469 slp->ns_cc = 1000000000 - auio.uio_resid;
470 }
471 while (m->m_next)
472 m = m->m_next;
473 slp->ns_rawend = m;
474
475 /*
476 * Now try and parse record(s) out of the raw stream data.
477 */
478 error = nfsrv_getstream(slp, waitflag);
479 if (error) {
480 if (error == EPERM)
481 slp->ns_flag |= SLP_DISCONN;
482 else
483 slp->ns_flag |= SLP_NEEDQ;
484 }
485 } else {
486 do {
487 auio.uio_resid = 1000000000;
488 flags = MSG_DONTWAIT;
489 NFSD_UNLOCK();
490 error = soreceive(so, &nam, &auio, &mp, NULL, &flags);
491 if (mp) {
492 struct nfsrv_rec *rec;
493 rec = malloc(sizeof(struct nfsrv_rec),
494 M_NFSRVDESC,
495 waitflag == M_DONTWAIT ? M_NOWAIT : M_WAITOK);
496 if (!rec) {
497 if (nam)
498 FREE(nam, M_SONAME);
499 m_freem(mp);
500 NFSD_LOCK();
501 continue;
502 }
503 nfs_realign(&mp, 10 * NFSX_UNSIGNED);
504 NFSD_LOCK();
505 rec->nr_address = nam;
506 rec->nr_packet = mp;
507 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
508 } else
509 NFSD_LOCK();
510 if (error) {
511 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
512 && error != EWOULDBLOCK) {
513 slp->ns_flag |= SLP_DISCONN;
514 goto dorecs;
515 }
516 }
517 } while (mp);
518 }
519
520 /*
521 * Now try and process the request records, non-blocking.
522 */
523 dorecs:
524 if (waitflag == M_DONTWAIT &&
525 (STAILQ_FIRST(&slp->ns_rec) != NULL ||
526 (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN))))
527 nfsrv_wakenfsd(slp);
528 NFSD_UNLOCK();
529 }
530
531 /*
532 * Try and extract an RPC request from the mbuf data list received on a
533 * stream socket. The "waitflag" argument indicates whether or not it
534 * can sleep.
535 */
536 static int
537 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag)
538 {
539 struct mbuf *m, **mpp;
540 char *cp1, *cp2;
541 int len;
542 struct mbuf *om, *m2, *recm;
543 u_int32_t recmark;
544
545 NFSD_LOCK_ASSERT();
546
547 if (slp->ns_flag & SLP_GETSTREAM)
548 panic("nfs getstream");
549 slp->ns_flag |= SLP_GETSTREAM;
550 for (;;) {
551 if (slp->ns_reclen == 0) {
552 if (slp->ns_cc < NFSX_UNSIGNED) {
553 slp->ns_flag &= ~SLP_GETSTREAM;
554 return (0);
555 }
556 m = slp->ns_raw;
557 if (m->m_len >= NFSX_UNSIGNED) {
558 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
559 m->m_data += NFSX_UNSIGNED;
560 m->m_len -= NFSX_UNSIGNED;
561 } else {
562 cp1 = (caddr_t)&recmark;
563 cp2 = mtod(m, caddr_t);
564 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
565 while (m->m_len == 0) {
566 m = m->m_next;
567 cp2 = mtod(m, caddr_t);
568 }
569 *cp1++ = *cp2++;
570 m->m_data++;
571 m->m_len--;
572 }
573 }
574 slp->ns_cc -= NFSX_UNSIGNED;
575 recmark = ntohl(recmark);
576 slp->ns_reclen = recmark & ~0x80000000;
577 if (recmark & 0x80000000)
578 slp->ns_flag |= SLP_LASTFRAG;
579 else
580 slp->ns_flag &= ~SLP_LASTFRAG;
581 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
582 slp->ns_flag &= ~SLP_GETSTREAM;
583 return (EPERM);
584 }
585 }
586
587 /*
588 * Now get the record part.
589 *
590 * Note that slp->ns_reclen may be 0. Linux sometimes
591 * generates 0-length RPCs.
592 */
593 recm = NULL;
594 if (slp->ns_cc == slp->ns_reclen) {
595 recm = slp->ns_raw;
596 slp->ns_raw = slp->ns_rawend = NULL;
597 slp->ns_cc = slp->ns_reclen = 0;
598 } else if (slp->ns_cc > slp->ns_reclen) {
599 len = 0;
600 m = slp->ns_raw;
601 om = NULL;
602
603 while (len < slp->ns_reclen) {
604 if ((len + m->m_len) > slp->ns_reclen) {
605 NFSD_UNLOCK();
606 m2 = m_copym(m, 0, slp->ns_reclen - len,
607 waitflag);
608 NFSD_LOCK();
609 if (m2) {
610 if (om) {
611 om->m_next = m2;
612 recm = slp->ns_raw;
613 } else
614 recm = m2;
615 m->m_data += slp->ns_reclen - len;
616 m->m_len -= slp->ns_reclen - len;
617 len = slp->ns_reclen;
618 } else {
619 slp->ns_flag &= ~SLP_GETSTREAM;
620 return (EWOULDBLOCK);
621 }
622 } else if ((len + m->m_len) == slp->ns_reclen) {
623 om = m;
624 len += m->m_len;
625 m = m->m_next;
626 recm = slp->ns_raw;
627 om->m_next = NULL;
628 } else {
629 om = m;
630 len += m->m_len;
631 m = m->m_next;
632 }
633 }
634 slp->ns_raw = m;
635 slp->ns_cc -= len;
636 slp->ns_reclen = 0;
637 } else {
638 slp->ns_flag &= ~SLP_GETSTREAM;
639 return (0);
640 }
641
642 /*
643 * Accumulate the fragments into a record.
644 */
645 mpp = &slp->ns_frag;
646 while (*mpp)
647 mpp = &((*mpp)->m_next);
648 *mpp = recm;
649 if (slp->ns_flag & SLP_LASTFRAG) {
650 struct nfsrv_rec *rec;
651 NFSD_UNLOCK();
652 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC,
653 waitflag == M_DONTWAIT ? M_NOWAIT : M_WAITOK);
654 NFSD_LOCK();
655 if (!rec) {
656 m_freem(slp->ns_frag);
657 } else {
658 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
659 rec->nr_address = NULL;
660 rec->nr_packet = slp->ns_frag;
661 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
662 }
663 slp->ns_frag = NULL;
664 }
665 }
666 }
667
668 /*
669 * Parse an RPC header.
670 */
671 int
672 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
673 struct nfsrv_descript **ndp)
674 {
675 struct nfsrv_rec *rec;
676 struct mbuf *m;
677 struct sockaddr *nam;
678 struct nfsrv_descript *nd;
679 int error;
680
681 NFSD_LOCK_ASSERT();
682
683 *ndp = NULL;
684 if ((slp->ns_flag & SLP_VALID) == 0 ||
685 STAILQ_FIRST(&slp->ns_rec) == NULL)
686 return (ENOBUFS);
687 rec = STAILQ_FIRST(&slp->ns_rec);
688 KASSERT(rec->nr_packet != NULL, ("nfsrv_dorec: missing mbuf"));
689 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
690 nam = rec->nr_address;
691 m = rec->nr_packet;
692 free(rec, M_NFSRVDESC);
693 NFSD_UNLOCK();
694 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
695 M_NFSRVDESC, M_WAITOK);
696 nd->nd_cr = crget();
697 NFSD_LOCK();
698 nd->nd_md = nd->nd_mrep = m;
699 nd->nd_nam2 = nam;
700 nd->nd_dpos = mtod(m, caddr_t);
701 error = nfs_getreq(nd, nfsd, TRUE);
702 if (error) {
703 if (nam) {
704 FREE(nam, M_SONAME);
705 }
706 if (nd->nd_cr != NULL)
707 crfree(nd->nd_cr);
708 free((caddr_t)nd, M_NFSRVDESC);
709 return (error);
710 }
711 *ndp = nd;
712 nfsd->nfsd_nd = nd;
713 return (0);
714 }
715
716 /*
717 * Search for a sleeping nfsd and wake it up.
718 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the
719 * running nfsds will go look for the work in the nfssvc_sock list.
720 */
721 void
722 nfsrv_wakenfsd(struct nfssvc_sock *slp)
723 {
724 struct nfsd *nd;
725
726 NFSD_LOCK_ASSERT();
727
728 if ((slp->ns_flag & SLP_VALID) == 0)
729 return;
730 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
731 if (nd->nfsd_flag & NFSD_WAITING) {
732 nd->nfsd_flag &= ~NFSD_WAITING;
733 if (nd->nfsd_slp)
734 panic("nfsd wakeup");
735 slp->ns_sref++;
736 nd->nfsd_slp = slp;
737 wakeup(nd);
738 return;
739 }
740 }
741 slp->ns_flag |= SLP_DOREC;
742 nfsd_head_flag |= NFSD_CHECKSLP;
743 }
744
745 /*
746 * This is the nfs send routine.
747 * For the server side:
748 * - return EINTR or ERESTART if interrupted by a signal
749 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
750 * - do any cleanup required by recoverable socket errors (?)
751 */
752 int
753 nfsrv_send(struct socket *so, struct sockaddr *nam, struct mbuf *top)
754 {
755 struct sockaddr *sendnam;
756 int error, soflags, flags;
757
758 NFSD_UNLOCK_ASSERT();
759
760 soflags = so->so_proto->pr_flags;
761 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
762 sendnam = NULL;
763 else
764 sendnam = nam;
765 if (so->so_type == SOCK_SEQPACKET)
766 flags = MSG_EOR;
767 else
768 flags = 0;
769
770 error = sosend(so, sendnam, 0, top, 0, flags, curthread/*XXX*/);
771 if (error == ENOBUFS && so->so_type == SOCK_DGRAM)
772 error = 0;
773
774 if (error) {
775 log(LOG_INFO, "nfsd send error %d\n", error);
776
777 /*
778 * Handle any recoverable (soft) socket errors here. (?)
779 */
780 if (error != EINTR && error != ERESTART &&
781 error != EWOULDBLOCK && error != EPIPE)
782 error = 0;
783 }
784 return (error);
785 }
786
787 /*
788 * NFS server timer routine.
789 */
790 void
791 nfsrv_timer(void *arg)
792 {
793 struct nfssvc_sock *slp;
794 u_quad_t cur_usec;
795
796 NFSD_LOCK();
797 /*
798 * Scan the write gathering queues for writes that need to be
799 * completed now.
800 */
801 cur_usec = nfs_curusec();
802 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
803 if (LIST_FIRST(&slp->ns_tq) &&
804 LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec)
805 nfsrv_wakenfsd(slp);
806 }
807 NFSD_UNLOCK();
808 callout_reset(&nfsrv_callout, nfsrv_ticks, nfsrv_timer, NULL);
809 }
Cache object: 01b5ce944e6853237cedd8f83d709b4b
|